repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
libos-nuse/net-next-nuse | drivers/media/rc/ir-lirc-codec.c | 129 | 10015 | /* ir-lirc-codec.c - rc-core to classic lirc interface bridge
*
* Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
#include <media/rc-core.h>
#include "rc-core-priv.h"
#define LIRCBUF_SIZE 256
/**
* ir_lirc_decode() - Send raw IR data to lirc_dev to be relayed to the
* lircd userspace daemon for decoding.
* @input_dev: the struct rc_dev descriptor of the device
* @duration: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the lirc interfaces aren't wired up.
*/
static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct lirc_codec *lirc = &dev->raw->lirc;
int sample;
if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
return -EINVAL;
/* Packet start */
if (ev.reset) {
/* Userspace expects a long space event before the start of
* the signal to use as a sync. This may be done with repeat
* packets and normal samples. But if a reset has been sent
* then we assume that a long time has passed, so we send a
* space with the maximum time value. */
sample = LIRC_SPACE(LIRC_VALUE_MASK);
IR_dprintk(2, "delivering reset sync space to lirc_dev\n");
/* Carrier reports */
} else if (ev.carrier_report) {
sample = LIRC_FREQUENCY(ev.carrier);
IR_dprintk(2, "carrier report (freq: %d)\n", sample);
/* Packet end */
} else if (ev.timeout) {
if (lirc->gap)
return 0;
lirc->gap_start = ktime_get();
lirc->gap = true;
lirc->gap_duration = ev.duration;
if (!lirc->send_timeout_reports)
return 0;
sample = LIRC_TIMEOUT(ev.duration / 1000);
IR_dprintk(2, "timeout report (duration: %d)\n", sample);
/* Normal sample */
} else {
if (lirc->gap) {
int gap_sample;
lirc->gap_duration += ktime_to_ns(ktime_sub(ktime_get(),
lirc->gap_start));
/* Convert to ms and cap by LIRC_VALUE_MASK */
do_div(lirc->gap_duration, 1000);
lirc->gap_duration = min(lirc->gap_duration,
(u64)LIRC_VALUE_MASK);
gap_sample = LIRC_SPACE(lirc->gap_duration);
lirc_buffer_write(dev->raw->lirc.drv->rbuf,
(unsigned char *) &gap_sample);
lirc->gap = false;
}
sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
LIRC_SPACE(ev.duration / 1000);
IR_dprintk(2, "delivering %uus %s to lirc_dev\n",
TO_US(ev.duration), TO_STR(ev.pulse));
}
lirc_buffer_write(dev->raw->lirc.drv->rbuf,
(unsigned char *) &sample);
wake_up(&dev->raw->lirc.drv->rbuf->wait_poll);
return 0;
}
static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
size_t n, loff_t *ppos)
{
struct lirc_codec *lirc;
struct rc_dev *dev;
unsigned int *txbuf; /* buffer with values to transmit */
ssize_t ret = -EINVAL;
size_t count;
ktime_t start;
s64 towait;
unsigned int duration = 0; /* signal duration in us */
int i;
start = ktime_get();
lirc = lirc_get_pdata(file);
if (!lirc)
return -EFAULT;
if (n < sizeof(unsigned) || n % sizeof(unsigned))
return -EINVAL;
count = n / sizeof(unsigned);
if (count > LIRCBUF_SIZE || count % 2 == 0)
return -EINVAL;
txbuf = memdup_user(buf, n);
if (IS_ERR(txbuf))
return PTR_ERR(txbuf);
dev = lirc->dev;
if (!dev) {
ret = -EFAULT;
goto out;
}
if (!dev->tx_ir) {
ret = -ENOSYS;
goto out;
}
for (i = 0; i < count; i++) {
if (txbuf[i] > IR_MAX_DURATION / 1000 - duration || !txbuf[i]) {
ret = -EINVAL;
goto out;
}
duration += txbuf[i];
}
ret = dev->tx_ir(dev, txbuf, count);
if (ret < 0)
goto out;
for (duration = i = 0; i < ret; i++)
duration += txbuf[i];
ret *= sizeof(unsigned int);
/*
* The lircd gap calculation expects the write function to
* wait for the actual IR signal to be transmitted before
* returning.
*/
towait = ktime_us_delta(ktime_add_us(start, duration), ktime_get());
if (towait > 0) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(towait));
}
out:
kfree(txbuf);
return ret;
}
static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
struct lirc_codec *lirc;
struct rc_dev *dev;
u32 __user *argp = (u32 __user *)(arg);
int ret = 0;
__u32 val = 0, tmp;
lirc = lirc_get_pdata(filep);
if (!lirc)
return -EFAULT;
dev = lirc->dev;
if (!dev)
return -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
ret = get_user(val, argp);
if (ret)
return ret;
}
switch (cmd) {
/* legacy support */
case LIRC_GET_SEND_MODE:
val = LIRC_CAN_SEND_PULSE & LIRC_CAN_SEND_MASK;
break;
case LIRC_SET_SEND_MODE:
if (val != (LIRC_MODE_PULSE & LIRC_CAN_SEND_MASK))
return -EINVAL;
return 0;
/* TX settings */
case LIRC_SET_TRANSMITTER_MASK:
if (!dev->s_tx_mask)
return -ENOSYS;
return dev->s_tx_mask(dev, val);
case LIRC_SET_SEND_CARRIER:
if (!dev->s_tx_carrier)
return -ENOSYS;
return dev->s_tx_carrier(dev, val);
case LIRC_SET_SEND_DUTY_CYCLE:
if (!dev->s_tx_duty_cycle)
return -ENOSYS;
if (val <= 0 || val >= 100)
return -EINVAL;
return dev->s_tx_duty_cycle(dev, val);
/* RX settings */
case LIRC_SET_REC_CARRIER:
if (!dev->s_rx_carrier_range)
return -ENOSYS;
if (val <= 0)
return -EINVAL;
return dev->s_rx_carrier_range(dev,
dev->raw->lirc.carrier_low,
val);
case LIRC_SET_REC_CARRIER_RANGE:
if (val <= 0)
return -EINVAL;
dev->raw->lirc.carrier_low = val;
return 0;
case LIRC_GET_REC_RESOLUTION:
val = dev->rx_resolution;
break;
case LIRC_SET_WIDEBAND_RECEIVER:
if (!dev->s_learning_mode)
return -ENOSYS;
return dev->s_learning_mode(dev, !!val);
case LIRC_SET_MEASURE_CARRIER_MODE:
if (!dev->s_carrier_report)
return -ENOSYS;
return dev->s_carrier_report(dev, !!val);
/* Generic timeout support */
case LIRC_GET_MIN_TIMEOUT:
if (!dev->max_timeout)
return -ENOSYS;
val = dev->min_timeout / 1000;
break;
case LIRC_GET_MAX_TIMEOUT:
if (!dev->max_timeout)
return -ENOSYS;
val = dev->max_timeout / 1000;
break;
case LIRC_SET_REC_TIMEOUT:
if (!dev->max_timeout)
return -ENOSYS;
tmp = val * 1000;
if (tmp < dev->min_timeout ||
tmp > dev->max_timeout)
return -EINVAL;
dev->timeout = tmp;
break;
case LIRC_SET_REC_TIMEOUT_REPORTS:
lirc->send_timeout_reports = !!val;
break;
default:
return lirc_dev_fop_ioctl(filep, cmd, arg);
}
if (_IOC_DIR(cmd) & _IOC_READ)
ret = put_user(val, argp);
return ret;
}
static int ir_lirc_open(void *data)
{
return 0;
}
static void ir_lirc_close(void *data)
{
return;
}
static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.write = ir_lirc_transmit_ir,
.unlocked_ioctl = ir_lirc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ir_lirc_ioctl,
#endif
.read = lirc_dev_fop_read,
.poll = lirc_dev_fop_poll,
.open = lirc_dev_fop_open,
.release = lirc_dev_fop_close,
.llseek = no_llseek,
};
static int ir_lirc_register(struct rc_dev *dev)
{
struct lirc_driver *drv;
struct lirc_buffer *rbuf;
int rc = -ENOMEM;
unsigned long features;
drv = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
if (!drv)
return rc;
rbuf = kzalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
if (!rbuf)
goto rbuf_alloc_failed;
rc = lirc_buffer_init(rbuf, sizeof(int), LIRCBUF_SIZE);
if (rc)
goto rbuf_init_failed;
features = LIRC_CAN_REC_MODE2;
if (dev->tx_ir) {
features |= LIRC_CAN_SEND_PULSE;
if (dev->s_tx_mask)
features |= LIRC_CAN_SET_TRANSMITTER_MASK;
if (dev->s_tx_carrier)
features |= LIRC_CAN_SET_SEND_CARRIER;
if (dev->s_tx_duty_cycle)
features |= LIRC_CAN_SET_SEND_DUTY_CYCLE;
}
if (dev->s_rx_carrier_range)
features |= LIRC_CAN_SET_REC_CARRIER |
LIRC_CAN_SET_REC_CARRIER_RANGE;
if (dev->s_learning_mode)
features |= LIRC_CAN_USE_WIDEBAND_RECEIVER;
if (dev->s_carrier_report)
features |= LIRC_CAN_MEASURE_CARRIER;
if (dev->max_timeout)
features |= LIRC_CAN_SET_REC_TIMEOUT;
snprintf(drv->name, sizeof(drv->name), "ir-lirc-codec (%s)",
dev->driver_name);
drv->minor = -1;
drv->features = features;
drv->data = &dev->raw->lirc;
drv->rbuf = rbuf;
drv->set_use_inc = &ir_lirc_open;
drv->set_use_dec = &ir_lirc_close;
drv->code_length = sizeof(struct ir_raw_event) * 8;
drv->fops = &lirc_fops;
drv->dev = &dev->dev;
drv->rdev = dev;
drv->owner = THIS_MODULE;
drv->minor = lirc_register_driver(drv);
if (drv->minor < 0) {
rc = -ENODEV;
goto lirc_register_failed;
}
dev->raw->lirc.drv = drv;
dev->raw->lirc.dev = dev;
return 0;
lirc_register_failed:
rbuf_init_failed:
kfree(rbuf);
rbuf_alloc_failed:
kfree(drv);
return rc;
}
static int ir_lirc_unregister(struct rc_dev *dev)
{
struct lirc_codec *lirc = &dev->raw->lirc;
lirc_unregister_driver(lirc->drv->minor);
lirc_buffer_free(lirc->drv->rbuf);
kfree(lirc->drv->rbuf);
kfree(lirc->drv);
return 0;
}
static struct ir_raw_handler lirc_handler = {
.protocols = 0,
.decode = ir_lirc_decode,
.raw_register = ir_lirc_register,
.raw_unregister = ir_lirc_unregister,
};
static int __init ir_lirc_codec_init(void)
{
ir_raw_handler_register(&lirc_handler);
printk(KERN_INFO "IR LIRC bridge handler initialized\n");
return 0;
}
static void __exit ir_lirc_codec_exit(void)
{
ir_raw_handler_unregister(&lirc_handler);
}
module_init(ir_lirc_codec_init);
module_exit(ir_lirc_codec_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("LIRC IR handler bridge");
| gpl-2.0 |
ChiefzReloaded/lge-kernel-startablet-new | sound/soc/sh/fsi-ak4642.c | 129 | 1878 | /*
* FSI-AK464x sound support for ms7724se
*
* Copyright (C) 2009 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include <sound/sh_fsi.h>
#include <../sound/soc/codecs/ak4642.h>
static int fsi_ak4642_dai_init(struct snd_soc_codec *codec)
{
int ret;
ret = snd_soc_dai_set_fmt(&ak4642_dai, SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_sysclk(&ak4642_dai, 0, 11289600, 0);
return ret;
}
static struct snd_soc_dai_link fsi_dai_link = {
.name = "AK4642",
.stream_name = "AK4642",
.cpu_dai = &fsi_soc_dai[FSI_PORT_A],
.codec_dai = &ak4642_dai,
.init = fsi_ak4642_dai_init,
.ops = NULL,
};
static struct snd_soc_card fsi_soc_card = {
.name = "FSI",
.platform = &fsi_soc_platform,
.dai_link = &fsi_dai_link,
.num_links = 1,
};
static struct snd_soc_device fsi_snd_devdata = {
.card = &fsi_soc_card,
.codec_dev = &soc_codec_dev_ak4642,
};
static struct platform_device *fsi_snd_device;
static int __init fsi_ak4642_init(void)
{
int ret = -ENOMEM;
fsi_snd_device = platform_device_alloc("soc-audio", FSI_PORT_A);
if (!fsi_snd_device)
goto out;
platform_set_drvdata(fsi_snd_device,
&fsi_snd_devdata);
fsi_snd_devdata.dev = &fsi_snd_device->dev;
ret = platform_device_add(fsi_snd_device);
if (ret)
platform_device_put(fsi_snd_device);
out:
return ret;
}
static void __exit fsi_ak4642_exit(void)
{
platform_device_unregister(fsi_snd_device);
}
module_init(fsi_ak4642_init);
module_exit(fsi_ak4642_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic SH4 FSI-AK4642 sound card");
MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
| gpl-2.0 |
tvall43/odroid-w_kernel | fs/splice.c | 129 | 46932 | /*
* "splice": joining two ropes together by interweaving their strands.
*
* This is the "extended pipe" functionality, where a pipe is used as
* an arbitrary in-memory buffer. Think of a pipe as a small kernel
* buffer that you can use to transfer data from one end to the other.
*
* The traditional unix read/write is extended with a "splice()" operation
* that transfers data buffers to or from a pipe buffer.
*
* Named by Larry McVoy, original implementation from Linus, extended by
* Jens to support splicing to files, network, direct splicing, etc and
* fixing lots of bugs.
*
* Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
* Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
* Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
*
*/
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/splice.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/uio.h>
#include <linux/security.h>
#include <linux/gfp.h>
#include <linux/socket.h>
#include <linux/compat.h>
#include <linux/aio.h>
#include "internal.h"
/*
* Attempt to steal a page from a pipe buffer. This should perhaps go into
* a vm helper function, it's already simplified quite a bit by the
* addition of remove_mapping(). If success is returned, the caller may
* attempt to reuse this page for another destination.
*/
static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
struct address_space *mapping;
lock_page(page);
mapping = page_mapping(page);
if (mapping) {
WARN_ON(!PageUptodate(page));
/*
* At least for ext2 with nobh option, we need to wait on
* writeback completing on this page, since we'll remove it
* from the pagecache. Otherwise truncate wont wait on the
* page, allowing the disk blocks to be reused by someone else
* before we actually wrote our data to them. fs corruption
* ensues.
*/
wait_on_page_writeback(page);
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL))
goto out_unlock;
/*
* If we succeeded in removing the mapping, set LRU flag
* and return good.
*/
if (remove_mapping(mapping, page)) {
buf->flags |= PIPE_BUF_FLAG_LRU;
return 0;
}
}
/*
* Raced with truncate or failed to remove page from current
* address space, unlock and return failure.
*/
out_unlock:
unlock_page(page);
return 1;
}
static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
page_cache_release(buf->page);
buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
/*
* Check whether the contents of buf is OK to access. Since the content
* is a page cache page, IO may be in flight.
*/
static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
int err;
if (!PageUptodate(page)) {
lock_page(page);
/*
* Page got truncated/unhashed. This will cause a 0-byte
* splice, if this is the first page.
*/
if (!page->mapping) {
err = -ENODATA;
goto error;
}
/*
* Uh oh, read-error from disk.
*/
if (!PageUptodate(page)) {
err = -EIO;
goto error;
}
/*
* Page is ok afterall, we are done.
*/
unlock_page(page);
}
return 0;
error:
unlock_page(page);
return err;
}
const struct pipe_buf_operations page_cache_pipe_buf_ops = {
.can_merge = 0,
.confirm = page_cache_pipe_buf_confirm,
.release = page_cache_pipe_buf_release,
.steal = page_cache_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
return 1;
buf->flags |= PIPE_BUF_FLAG_LRU;
return generic_pipe_buf_steal(pipe, buf);
}
static const struct pipe_buf_operations user_page_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = page_cache_pipe_buf_release,
.steal = user_page_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
{
smp_mb();
if (waitqueue_active(&pipe->wait))
wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
/**
* splice_to_pipe - fill passed data into a pipe
* @pipe: pipe to fill
* @spd: data to fill
*
* Description:
* @spd contains a map of pages and len/offset tuples, along with
* the struct pipe_buf_operations associated with these pages. This
* function will link that data to the pipe.
*
*/
ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd)
{
unsigned int spd_pages = spd->nr_pages;
int ret, do_wakeup, page_nr;
ret = 0;
do_wakeup = 0;
page_nr = 0;
pipe_lock(pipe);
for (;;) {
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
if (pipe->nrbufs < pipe->buffers) {
int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
struct pipe_buffer *buf = pipe->bufs + newbuf;
buf->page = spd->pages[page_nr];
buf->offset = spd->partial[page_nr].offset;
buf->len = spd->partial[page_nr].len;
buf->private = spd->partial[page_nr].private;
buf->ops = spd->ops;
if (spd->flags & SPLICE_F_GIFT)
buf->flags |= PIPE_BUF_FLAG_GIFT;
pipe->nrbufs++;
page_nr++;
ret += buf->len;
if (pipe->files)
do_wakeup = 1;
if (!--spd->nr_pages)
break;
if (pipe->nrbufs < pipe->buffers)
continue;
break;
}
if (spd->flags & SPLICE_F_NONBLOCK) {
if (!ret)
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
if (!ret)
ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
smp_mb();
if (waitqueue_active(&pipe->wait))
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
}
pipe_unlock(pipe);
if (do_wakeup)
wakeup_pipe_readers(pipe);
while (page_nr < spd_pages)
spd->spd_release(spd, page_nr++);
return ret;
}
void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
{
page_cache_release(spd->pages[i]);
}
/*
* Check if we need to grow the arrays holding pages and partial page
* descriptions.
*/
int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
{
unsigned int buffers = ACCESS_ONCE(pipe->buffers);
spd->nr_pages_max = buffers;
if (buffers <= PIPE_DEF_BUFFERS)
return 0;
spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
if (spd->pages && spd->partial)
return 0;
kfree(spd->pages);
kfree(spd->partial);
return -ENOMEM;
}
void splice_shrink_spd(struct splice_pipe_desc *spd)
{
if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
return;
kfree(spd->pages);
kfree(spd->partial);
}
static int
__generic_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct address_space *mapping = in->f_mapping;
unsigned int loff, nr_pages, req_pages;
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
struct page *page;
pgoff_t index, end_index;
loff_t isize;
int error, page_nr;
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
.nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &page_cache_pipe_buf_ops,
.spd_release = spd_release_page,
};
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
index = *ppos >> PAGE_CACHE_SHIFT;
loff = *ppos & ~PAGE_CACHE_MASK;
req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
nr_pages = min(req_pages, spd.nr_pages_max);
/*
* Lookup the (hopefully) full range of pages we need.
*/
spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages);
index += spd.nr_pages;
/*
* If find_get_pages_contig() returned fewer pages than we needed,
* readahead/allocate the rest and fill in the holes.
*/
if (spd.nr_pages < nr_pages)
page_cache_sync_readahead(mapping, &in->f_ra, in,
index, req_pages - spd.nr_pages);
error = 0;
while (spd.nr_pages < nr_pages) {
/*
* Page could be there, find_get_pages_contig() breaks on
* the first hole.
*/
page = find_get_page(mapping, index);
if (!page) {
/*
* page didn't exist, allocate one.
*/
page = page_cache_alloc_cold(mapping);
if (!page)
break;
error = add_to_page_cache_lru(page, mapping, index,
GFP_KERNEL);
if (unlikely(error)) {
page_cache_release(page);
if (error == -EEXIST)
continue;
break;
}
/*
* add_to_page_cache() locks the page, unlock it
* to avoid convoluting the logic below even more.
*/
unlock_page(page);
}
spd.pages[spd.nr_pages++] = page;
index++;
}
/*
* Now loop over the map and see if we need to start IO on any
* pages, fill in the partial map, etc.
*/
index = *ppos >> PAGE_CACHE_SHIFT;
nr_pages = spd.nr_pages;
spd.nr_pages = 0;
for (page_nr = 0; page_nr < nr_pages; page_nr++) {
unsigned int this_len;
if (!len)
break;
/*
* this_len is the max we'll use from this page
*/
this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
page = spd.pages[page_nr];
if (PageReadahead(page))
page_cache_async_readahead(mapping, &in->f_ra, in,
page, index, req_pages - page_nr);
/*
* If the page isn't uptodate, we may need to start io on it
*/
if (!PageUptodate(page)) {
lock_page(page);
/*
* Page was truncated, or invalidated by the
* filesystem. Redo the find/create, but this time the
* page is kept locked, so there's no chance of another
* race with truncate/invalidate.
*/
if (!page->mapping) {
unlock_page(page);
page = find_or_create_page(mapping, index,
mapping_gfp_mask(mapping));
if (!page) {
error = -ENOMEM;
break;
}
page_cache_release(spd.pages[page_nr]);
spd.pages[page_nr] = page;
}
/*
* page was already under io and is now done, great
*/
if (PageUptodate(page)) {
unlock_page(page);
goto fill_it;
}
/*
* need to read in the page
*/
error = mapping->a_ops->readpage(in, page);
if (unlikely(error)) {
/*
* We really should re-lookup the page here,
* but it complicates things a lot. Instead
* lets just do what we already stored, and
* we'll get it the next time we are called.
*/
if (error == AOP_TRUNCATED_PAGE)
error = 0;
break;
}
}
fill_it:
/*
* i_size must be checked after PageUptodate.
*/
isize = i_size_read(mapping->host);
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(!isize || index > end_index))
break;
/*
* if this is the last page, see if we need to shrink
* the length and stop
*/
if (end_index == index) {
unsigned int plen;
/*
* max good bytes in this page
*/
plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
if (plen <= loff)
break;
/*
* force quit after adding this page
*/
this_len = min(this_len, plen - loff);
len = this_len;
}
spd.partial[page_nr].offset = loff;
spd.partial[page_nr].len = this_len;
len -= this_len;
loff = 0;
spd.nr_pages++;
index++;
}
/*
* Release any pages at the end, if we quit early. 'page_nr' is how far
* we got, 'nr_pages' is how many pages are in the map.
*/
while (page_nr < nr_pages)
page_cache_release(spd.pages[page_nr++]);
in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
if (spd.nr_pages)
error = splice_to_pipe(pipe, &spd);
splice_shrink_spd(&spd);
return error;
}
/**
* generic_file_splice_read - splice data from file to a pipe
* @in: file to splice from
* @ppos: position in @in
* @pipe: pipe to splice to
* @len: number of bytes to splice
* @flags: splice modifier flags
*
* Description:
* Will read pages from given file and fill them into a pipe. Can be
* used as long as the address_space operations for the source implements
* a readpage() hook.
*
*/
ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
loff_t isize, left;
int ret;
isize = i_size_read(in->f_mapping->host);
if (unlikely(*ppos >= isize))
return 0;
left = isize - *ppos;
if (unlikely(left < len))
len = left;
ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
if (ret > 0) {
*ppos += ret;
file_accessed(in);
}
return ret;
}
EXPORT_SYMBOL(generic_file_splice_read);
static const struct pipe_buf_operations default_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
return 1;
}
/* Pipe buffer operations for a socket and similar. */
const struct pipe_buf_operations nosteal_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_nosteal,
.get = generic_pipe_buf_get,
};
EXPORT_SYMBOL(nosteal_pipe_buf_ops);
static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
unsigned long vlen, loff_t offset)
{
mm_segment_t old_fs;
loff_t pos = offset;
ssize_t res;
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
set_fs(old_fs);
return res;
}
ssize_t kernel_write(struct file *file, const char *buf, size_t count,
loff_t pos)
{
mm_segment_t old_fs;
ssize_t res;
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
res = vfs_write(file, (__force const char __user *)buf, count, &pos);
set_fs(old_fs);
return res;
}
EXPORT_SYMBOL(kernel_write);
ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
unsigned int nr_pages;
unsigned int nr_freed;
size_t offset;
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
struct iovec *vec, __vec[PIPE_DEF_BUFFERS];
ssize_t res;
size_t this_len;
int error;
int i;
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
.nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &default_pipe_buf_ops,
.spd_release = spd_release_page,
};
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
res = -ENOMEM;
vec = __vec;
if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
if (!vec)
goto shrink_ret;
}
offset = *ppos & ~PAGE_CACHE_MASK;
nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
struct page *page;
page = alloc_page(GFP_USER);
error = -ENOMEM;
if (!page)
goto err;
this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
vec[i].iov_base = (void __user *) page_address(page);
vec[i].iov_len = this_len;
spd.pages[i] = page;
spd.nr_pages++;
len -= this_len;
offset = 0;
}
res = kernel_readv(in, vec, spd.nr_pages, *ppos);
if (res < 0) {
error = res;
goto err;
}
error = 0;
if (!res)
goto err;
nr_freed = 0;
for (i = 0; i < spd.nr_pages; i++) {
this_len = min_t(size_t, vec[i].iov_len, res);
spd.partial[i].offset = 0;
spd.partial[i].len = this_len;
if (!this_len) {
__free_page(spd.pages[i]);
spd.pages[i] = NULL;
nr_freed++;
}
res -= this_len;
}
spd.nr_pages -= nr_freed;
res = splice_to_pipe(pipe, &spd);
if (res > 0)
*ppos += res;
shrink_ret:
if (vec != __vec)
kfree(vec);
splice_shrink_spd(&spd);
return res;
err:
for (i = 0; i < spd.nr_pages; i++)
__free_page(spd.pages[i]);
res = error;
goto shrink_ret;
}
EXPORT_SYMBOL(default_file_splice_read);
/*
* Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
* using sendpage(). Return the number of bytes sent.
*/
static int pipe_to_sendpage(struct pipe_inode_info *pipe,
struct pipe_buffer *buf, struct splice_desc *sd)
{
struct file *file = sd->u.file;
loff_t pos = sd->pos;
int more;
if (!likely(file->f_op->sendpage))
return -EINVAL;
more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
if (sd->len < sd->total_len && pipe->nrbufs > 1)
more |= MSG_SENDPAGE_NOTLAST;
return file->f_op->sendpage(file, buf->page, buf->offset,
sd->len, &pos, more);
}
static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
{
smp_mb();
if (waitqueue_active(&pipe->wait))
wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
/**
* splice_from_pipe_feed - feed available data from a pipe to a file
* @pipe: pipe to splice from
* @sd: information to @actor
* @actor: handler that splices the data
*
* Description:
* This function loops over the pipe and calls @actor to do the
* actual moving of a single struct pipe_buffer to the desired
* destination. It returns when there's no more buffers left in
* the pipe or if the requested number of bytes (@sd->total_len)
* have been copied. It returns a positive number (one) if the
* pipe needs to be filled with more data, zero if the required
* number of bytes have been copied and -errno on error.
*
* This, together with splice_from_pipe_{begin,end,next}, may be
* used to implement the functionality of __splice_from_pipe() when
* locking is required around copying the pipe buffers to the
* destination.
*/
static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
splice_actor *actor)
{
int ret;
while (pipe->nrbufs) {
struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
const struct pipe_buf_operations *ops = buf->ops;
sd->len = buf->len;
if (sd->len > sd->total_len)
sd->len = sd->total_len;
ret = buf->ops->confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
return ret;
}
ret = actor(pipe, buf, sd);
if (ret <= 0)
return ret;
buf->offset += ret;
buf->len -= ret;
sd->num_spliced += ret;
sd->len -= ret;
sd->pos += ret;
sd->total_len -= ret;
if (!buf->len) {
buf->ops = NULL;
ops->release(pipe, buf);
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
if (pipe->files)
sd->need_wakeup = true;
}
if (!sd->total_len)
return 0;
}
return 1;
}
/**
* splice_from_pipe_next - wait for some data to splice from
* @pipe: pipe to splice from
* @sd: information about the splice operation
*
* Description:
* This function will wait for some data and return a positive
* value (one) if pipe buffers are available. It will return zero
* or -errno if no more data needs to be spliced.
*/
static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
{
while (!pipe->nrbufs) {
if (!pipe->writers)
return 0;
if (!pipe->waiting_writers && sd->num_spliced)
return 0;
if (sd->flags & SPLICE_F_NONBLOCK)
return -EAGAIN;
if (signal_pending(current))
return -ERESTARTSYS;
if (sd->need_wakeup) {
wakeup_pipe_writers(pipe);
sd->need_wakeup = false;
}
pipe_wait(pipe);
}
return 1;
}
/**
* splice_from_pipe_begin - start splicing from pipe
* @sd: information about the splice operation
*
* Description:
* This function should be called before a loop containing
* splice_from_pipe_next() and splice_from_pipe_feed() to
* initialize the necessary fields of @sd.
*/
static void splice_from_pipe_begin(struct splice_desc *sd)
{
sd->num_spliced = 0;
sd->need_wakeup = false;
}
/**
* splice_from_pipe_end - finish splicing from pipe
* @pipe: pipe to splice from
* @sd: information about the splice operation
*
* Description:
* This function will wake up pipe writers if necessary. It should
* be called after a loop containing splice_from_pipe_next() and
* splice_from_pipe_feed().
*/
static void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd)
{
if (sd->need_wakeup)
wakeup_pipe_writers(pipe);
}
/**
* __splice_from_pipe - splice data from a pipe to given actor
* @pipe: pipe to splice from
* @sd: information to @actor
* @actor: handler that splices the data
*
* Description:
* This function does little more than loop over the pipe and call
* @actor to do the actual moving of a single struct pipe_buffer to
* the desired destination. See pipe_to_file, pipe_to_sendpage, or
* pipe_to_user.
*
*/
ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
splice_actor *actor)
{
int ret;
splice_from_pipe_begin(sd);
do {
ret = splice_from_pipe_next(pipe, sd);
if (ret > 0)
ret = splice_from_pipe_feed(pipe, sd, actor);
} while (ret > 0);
splice_from_pipe_end(pipe, sd);
return sd->num_spliced ? sd->num_spliced : ret;
}
EXPORT_SYMBOL(__splice_from_pipe);
/**
* splice_from_pipe - splice data from a pipe to a file
* @pipe: pipe to splice from
* @out: file to splice to
* @ppos: position in @out
* @len: how many bytes to splice
* @flags: splice modifier flags
* @actor: handler that splices the data
*
* Description:
* See __splice_from_pipe. This function locks the pipe inode,
* otherwise it's identical to __splice_from_pipe().
*
*/
ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags,
splice_actor *actor)
{
ssize_t ret;
struct splice_desc sd = {
.total_len = len,
.flags = flags,
.pos = *ppos,
.u.file = out,
};
pipe_lock(pipe);
ret = __splice_from_pipe(pipe, &sd, actor);
pipe_unlock(pipe);
return ret;
}
/**
* iter_file_splice_write - splice data from a pipe to a file
* @pipe: pipe info
* @out: file to write to
* @ppos: position in @out
* @len: number of bytes to splice
* @flags: splice modifier flags
*
* Description:
* Will either move or copy pages (determined by @flags options) from
* the given pipe inode to the given file.
* This one is ->write_iter-based.
*
*/
ssize_t
iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
struct splice_desc sd = {
.total_len = len,
.flags = flags,
.pos = *ppos,
.u.file = out,
};
int nbufs = pipe->buffers;
struct bio_vec *array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
ssize_t ret;
if (unlikely(!array))
return -ENOMEM;
pipe_lock(pipe);
splice_from_pipe_begin(&sd);
while (sd.total_len) {
struct iov_iter from;
size_t left;
int n, idx;
ret = splice_from_pipe_next(pipe, &sd);
if (ret <= 0)
break;
if (unlikely(nbufs < pipe->buffers)) {
kfree(array);
nbufs = pipe->buffers;
array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
if (!array) {
ret = -ENOMEM;
break;
}
}
/* build the vector */
left = sd.total_len;
for (n = 0, idx = pipe->curbuf; left && n < pipe->nrbufs; n++, idx++) {
struct pipe_buffer *buf = pipe->bufs + idx;
size_t this_len = buf->len;
if (this_len > left)
this_len = left;
if (idx == pipe->buffers - 1)
idx = -1;
ret = buf->ops->confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
goto done;
}
array[n].bv_page = buf->page;
array[n].bv_len = this_len;
array[n].bv_offset = buf->offset;
left -= this_len;
}
iov_iter_bvec(&from, ITER_BVEC | WRITE, array, n,
sd.total_len - left);
ret = vfs_iter_write(out, &from, &sd.pos);
if (ret <= 0)
break;
sd.num_spliced += ret;
sd.total_len -= ret;
*ppos = sd.pos;
/* dismiss the fully eaten buffers, adjust the partial one */
while (ret) {
struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
if (ret >= buf->len) {
const struct pipe_buf_operations *ops = buf->ops;
ret -= buf->len;
buf->len = 0;
buf->ops = NULL;
ops->release(pipe, buf);
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
if (pipe->files)
sd.need_wakeup = true;
} else {
buf->offset += ret;
buf->len -= ret;
ret = 0;
}
}
}
done:
kfree(array);
splice_from_pipe_end(pipe, &sd);
pipe_unlock(pipe);
if (sd.num_spliced)
ret = sd.num_spliced;
return ret;
}
EXPORT_SYMBOL(iter_file_splice_write);
static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
int ret;
void *data;
loff_t tmp = sd->pos;
data = kmap(buf->page);
ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
kunmap(buf->page);
return ret;
}
static ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
ssize_t ret;
ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf);
if (ret > 0)
*ppos += ret;
return ret;
}
/**
* generic_splice_sendpage - splice data from a pipe to a socket
* @pipe: pipe to splice from
* @out: socket to write to
* @ppos: position in @out
* @len: number of bytes to splice
* @flags: splice modifier flags
*
* Description:
* Will send @len bytes from the pipe to a network socket. No data copying
* is involved.
*
*/
ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
}
EXPORT_SYMBOL(generic_splice_sendpage);
/*
* Attempt to initiate a splice from pipe to file.
*/
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
loff_t *, size_t, unsigned int);
if (out->f_op->splice_write)
splice_write = out->f_op->splice_write;
else
splice_write = default_file_splice_write;
return splice_write(pipe, out, ppos, len, flags);
}
/*
* Attempt to initiate a splice from a file to a pipe.
*/
static long do_splice_to(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
ssize_t (*splice_read)(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
int ret;
if (unlikely(!(in->f_mode & FMODE_READ)))
return -EBADF;
ret = rw_verify_area(READ, in, ppos, len);
if (unlikely(ret < 0))
return ret;
if (in->f_op->splice_read)
splice_read = in->f_op->splice_read;
else
splice_read = default_file_splice_read;
return splice_read(in, ppos, pipe, len, flags);
}
/**
* splice_direct_to_actor - splices data directly between two non-pipes
* @in: file to splice from
* @sd: actor information on where to splice to
* @actor: handles the data splicing
*
* Description:
* This is a special case helper to splice directly between two
* points, without requiring an explicit pipe. Internally an allocated
* pipe is cached in the process, and reused during the lifetime of
* that process.
*
*/
ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
splice_direct_actor *actor)
{
struct pipe_inode_info *pipe;
long ret, bytes;
umode_t i_mode;
size_t len;
int i, flags;
/*
* We require the input being a regular file, as we don't want to
* randomly drop data for eg socket -> socket splicing. Use the
* piped splicing for that!
*/
i_mode = file_inode(in)->i_mode;
if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
return -EINVAL;
/*
* neither in nor out is a pipe, setup an internal pipe attached to
* 'out' and transfer the wanted data from 'in' to 'out' through that
*/
pipe = current->splice_pipe;
if (unlikely(!pipe)) {
pipe = alloc_pipe_info();
if (!pipe)
return -ENOMEM;
/*
* We don't have an immediate reader, but we'll read the stuff
* out of the pipe right after the splice_to_pipe(). So set
* PIPE_READERS appropriately.
*/
pipe->readers = 1;
current->splice_pipe = pipe;
}
/*
* Do the splice.
*/
ret = 0;
bytes = 0;
len = sd->total_len;
flags = sd->flags;
/*
* Don't block on output, we have to drain the direct pipe.
*/
sd->flags &= ~SPLICE_F_NONBLOCK;
while (len) {
size_t read_len;
loff_t pos = sd->pos, prev_pos = pos;
ret = do_splice_to(in, &pos, pipe, len, flags);
if (unlikely(ret <= 0))
goto out_release;
read_len = ret;
sd->total_len = read_len;
/*
* NOTE: nonblocking mode only applies to the input. We
* must not do the output in nonblocking mode as then we
* could get stuck data in the internal pipe:
*/
ret = actor(pipe, sd);
if (unlikely(ret <= 0)) {
sd->pos = prev_pos;
goto out_release;
}
bytes += ret;
len -= ret;
sd->pos = pos;
if (ret < read_len) {
sd->pos = prev_pos + ret;
goto out_release;
}
}
done:
pipe->nrbufs = pipe->curbuf = 0;
file_accessed(in);
return bytes;
out_release:
/*
* If we did an incomplete transfer we must release
* the pipe buffers in question:
*/
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops) {
buf->ops->release(pipe, buf);
buf->ops = NULL;
}
}
if (!bytes)
bytes = ret;
goto done;
}
EXPORT_SYMBOL(splice_direct_to_actor);
static int direct_splice_actor(struct pipe_inode_info *pipe,
struct splice_desc *sd)
{
struct file *file = sd->u.file;
return do_splice_from(pipe, file, sd->opos, sd->total_len,
sd->flags);
}
/**
* do_splice_direct - splices data directly between two files
* @in: file to splice from
* @ppos: input file offset
* @out: file to splice to
* @opos: output file offset
* @len: number of bytes to splice
* @flags: splice modifier flags
*
* Description:
* For use by do_sendfile(). splice can easily emulate sendfile, but
* doing it in the application would incur an extra system call
* (splice in + splice out, as compared to just sendfile()). So this helper
* can splice directly through a process-private pipe.
*
*/
long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
loff_t *opos, size_t len, unsigned int flags)
{
struct splice_desc sd = {
.len = len,
.total_len = len,
.flags = flags,
.pos = *ppos,
.u.file = out,
.opos = opos,
};
long ret;
if (unlikely(!(out->f_mode & FMODE_WRITE)))
return -EBADF;
if (unlikely(out->f_flags & O_APPEND))
return -EINVAL;
ret = rw_verify_area(WRITE, out, opos, len);
if (unlikely(ret < 0))
return ret;
ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
if (ret > 0)
*ppos = sd.pos;
return ret;
}
EXPORT_SYMBOL(do_splice_direct);
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags);
/*
* Determine where to splice to/from.
*/
static long do_splice(struct file *in, loff_t __user *off_in,
struct file *out, loff_t __user *off_out,
size_t len, unsigned int flags)
{
struct pipe_inode_info *ipipe;
struct pipe_inode_info *opipe;
loff_t offset;
long ret;
ipipe = get_pipe_info(in);
opipe = get_pipe_info(out);
if (ipipe && opipe) {
if (off_in || off_out)
return -ESPIPE;
if (!(in->f_mode & FMODE_READ))
return -EBADF;
if (!(out->f_mode & FMODE_WRITE))
return -EBADF;
/* Splicing to self would be fun, but... */
if (ipipe == opipe)
return -EINVAL;
return splice_pipe_to_pipe(ipipe, opipe, len, flags);
}
if (ipipe) {
if (off_in)
return -ESPIPE;
if (off_out) {
if (!(out->f_mode & FMODE_PWRITE))
return -EINVAL;
if (copy_from_user(&offset, off_out, sizeof(loff_t)))
return -EFAULT;
} else {
offset = out->f_pos;
}
if (unlikely(!(out->f_mode & FMODE_WRITE)))
return -EBADF;
if (unlikely(out->f_flags & O_APPEND))
return -EINVAL;
ret = rw_verify_area(WRITE, out, &offset, len);
if (unlikely(ret < 0))
return ret;
file_start_write(out);
ret = do_splice_from(ipipe, out, &offset, len, flags);
file_end_write(out);
if (!off_out)
out->f_pos = offset;
else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
ret = -EFAULT;
return ret;
}
if (opipe) {
if (off_out)
return -ESPIPE;
if (off_in) {
if (!(in->f_mode & FMODE_PREAD))
return -EINVAL;
if (copy_from_user(&offset, off_in, sizeof(loff_t)))
return -EFAULT;
} else {
offset = in->f_pos;
}
ret = do_splice_to(in, &offset, opipe, len, flags);
if (!off_in)
in->f_pos = offset;
else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
ret = -EFAULT;
return ret;
}
return -EINVAL;
}
/*
* Map an iov into an array of pages and offset/length tupples. With the
* partial_page structure, we can map several non-contiguous ranges into
* our ones pages[] map instead of splitting that operation into pieces.
* Could easily be exported as a generic helper for other users, in which
* case one would probably want to add a 'max_nr_pages' parameter as well.
*/
static int get_iovec_page_array(const struct iovec __user *iov,
unsigned int nr_vecs, struct page **pages,
struct partial_page *partial, bool aligned,
unsigned int pipe_buffers)
{
int buffers = 0, error = 0;
while (nr_vecs) {
unsigned long off, npages;
struct iovec entry;
void __user *base;
size_t len;
int i;
error = -EFAULT;
if (copy_from_user(&entry, iov, sizeof(entry)))
break;
base = entry.iov_base;
len = entry.iov_len;
/*
* Sanity check this iovec. 0 read succeeds.
*/
error = 0;
if (unlikely(!len))
break;
error = -EFAULT;
if (!access_ok(VERIFY_READ, base, len))
break;
/*
* Get this base offset and number of pages, then map
* in the user pages.
*/
off = (unsigned long) base & ~PAGE_MASK;
/*
* If asked for alignment, the offset must be zero and the
* length a multiple of the PAGE_SIZE.
*/
error = -EINVAL;
if (aligned && (off || len & ~PAGE_MASK))
break;
npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (npages > pipe_buffers - buffers)
npages = pipe_buffers - buffers;
error = get_user_pages_fast((unsigned long)base, npages,
0, &pages[buffers]);
if (unlikely(error <= 0))
break;
/*
* Fill this contiguous range into the partial page map.
*/
for (i = 0; i < error; i++) {
const int plen = min_t(size_t, len, PAGE_SIZE - off);
partial[buffers].offset = off;
partial[buffers].len = plen;
off = 0;
len -= plen;
buffers++;
}
/*
* We didn't complete this iov, stop here since it probably
* means we have to move some of this into a pipe to
* be able to continue.
*/
if (len)
break;
/*
* Don't continue if we mapped fewer pages than we asked for,
* or if we mapped the max number of pages that we have
* room for.
*/
if (error < npages || buffers == pipe_buffers)
break;
nr_vecs--;
iov++;
}
if (buffers)
return buffers;
return error;
}
static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data);
return n == sd->len ? n : -EFAULT;
}
/*
* For lack of a better implementation, implement vmsplice() to userspace
* as a simple copy of the pipes pages to the user iov.
*/
static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
unsigned long nr_segs, unsigned int flags)
{
struct pipe_inode_info *pipe;
struct splice_desc sd;
long ret;
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
struct iov_iter iter;
ssize_t count;
pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
ret = rw_copy_check_uvector(READ, uiov, nr_segs,
ARRAY_SIZE(iovstack), iovstack, &iov);
if (ret <= 0)
goto out;
count = ret;
iov_iter_init(&iter, READ, iov, nr_segs, count);
sd.len = 0;
sd.total_len = count;
sd.flags = flags;
sd.u.data = &iter;
sd.pos = 0;
pipe_lock(pipe);
ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
pipe_unlock(pipe);
out:
if (iov != iovstack)
kfree(iov);
return ret;
}
/*
* vmsplice splices a user address range into a pipe. It can be thought of
* as splice-from-memory, where the regular splice is splice-from-file (or
* to file). In both cases the output is a pipe, naturally.
*/
static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
unsigned long nr_segs, unsigned int flags)
{
struct pipe_inode_info *pipe;
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
.nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &user_page_pipe_buf_ops,
.spd_release = spd_release_page,
};
long ret;
pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
spd.partial, false,
spd.nr_pages_max);
if (spd.nr_pages <= 0)
ret = spd.nr_pages;
else
ret = splice_to_pipe(pipe, &spd);
splice_shrink_spd(&spd);
return ret;
}
/*
* Note that vmsplice only really supports true splicing _from_ user memory
* to a pipe, not the other way around. Splicing from user memory is a simple
* operation that can be supported without any funky alignment restrictions
* or nasty vm tricks. We simply map in the user memory and fill them into
* a pipe. The reverse isn't quite as easy, though. There are two possible
* solutions for that:
*
* - memcpy() the data internally, at which point we might as well just
* do a regular read() on the buffer anyway.
* - Lots of nasty vm tricks, that are neither fast nor flexible (it
* has restriction limitations on both ends of the pipe).
*
* Currently we punt and implement it as a normal copy, see pipe_to_user().
*
*/
SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
unsigned long, nr_segs, unsigned int, flags)
{
struct fd f;
long error;
if (unlikely(nr_segs > UIO_MAXIOV))
return -EINVAL;
else if (unlikely(!nr_segs))
return 0;
error = -EBADF;
f = fdget(fd);
if (f.file) {
if (f.file->f_mode & FMODE_WRITE)
error = vmsplice_to_pipe(f.file, iov, nr_segs, flags);
else if (f.file->f_mode & FMODE_READ)
error = vmsplice_to_user(f.file, iov, nr_segs, flags);
fdput(f);
}
return error;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32,
unsigned int, nr_segs, unsigned int, flags)
{
unsigned i;
struct iovec __user *iov;
if (nr_segs > UIO_MAXIOV)
return -EINVAL;
iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
for (i = 0; i < nr_segs; i++) {
struct compat_iovec v;
if (get_user(v.iov_base, &iov32[i].iov_base) ||
get_user(v.iov_len, &iov32[i].iov_len) ||
put_user(compat_ptr(v.iov_base), &iov[i].iov_base) ||
put_user(v.iov_len, &iov[i].iov_len))
return -EFAULT;
}
return sys_vmsplice(fd, iov, nr_segs, flags);
}
#endif
SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
int, fd_out, loff_t __user *, off_out,
size_t, len, unsigned int, flags)
{
struct fd in, out;
long error;
if (unlikely(!len))
return 0;
error = -EBADF;
in = fdget(fd_in);
if (in.file) {
if (in.file->f_mode & FMODE_READ) {
out = fdget(fd_out);
if (out.file) {
if (out.file->f_mode & FMODE_WRITE)
error = do_splice(in.file, off_in,
out.file, off_out,
len, flags);
fdput(out);
}
}
fdput(in);
}
return error;
}
/*
* Make sure there's data to read. Wait for input if we can, otherwise
* return an appropriate error.
*/
static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
{
int ret;
/*
* Check ->nrbufs without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
if (pipe->nrbufs)
return 0;
ret = 0;
pipe_lock(pipe);
while (!pipe->nrbufs) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
if (!pipe->writers)
break;
if (!pipe->waiting_writers) {
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
}
}
pipe_wait(pipe);
}
pipe_unlock(pipe);
return ret;
}
/*
* Make sure there's writeable room. Wait for room if we can, otherwise
* return an appropriate error.
*/
static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
{
int ret;
/*
* Check ->nrbufs without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
if (pipe->nrbufs < pipe->buffers)
return 0;
ret = 0;
pipe_lock(pipe);
while (pipe->nrbufs >= pipe->buffers) {
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
break;
}
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
}
pipe_unlock(pipe);
return ret;
}
/*
* Splice contents of ipipe to opipe.
*/
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
int ret = 0, nbuf;
bool input_wakeup = false;
retry:
ret = ipipe_prep(ipipe, flags);
if (ret)
return ret;
ret = opipe_prep(opipe, flags);
if (ret)
return ret;
/*
* Potential ABBA deadlock, work around it by ordering lock
* grabbing by pipe info address. Otherwise two different processes
* could deadlock (one doing tee from A -> B, the other from B -> A).
*/
pipe_double_lock(ipipe, opipe);
do {
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
if (!ipipe->nrbufs && !ipipe->writers)
break;
/*
* Cannot make any progress, because either the input
* pipe is empty or the output pipe is full.
*/
if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) {
/* Already processed some buffers, break */
if (ret)
break;
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
}
/*
* We raced with another reader/writer and haven't
* managed to process any buffers. A zero return
* value means EOF, so retry instead.
*/
pipe_unlock(ipipe);
pipe_unlock(opipe);
goto retry;
}
ibuf = ipipe->bufs + ipipe->curbuf;
nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
obuf = opipe->bufs + nbuf;
if (len >= ibuf->len) {
/*
* Simply move the whole buffer from ipipe to opipe
*/
*obuf = *ibuf;
ibuf->ops = NULL;
opipe->nrbufs++;
ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1);
ipipe->nrbufs--;
input_wakeup = true;
} else {
/*
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
ibuf->ops->get(ipipe, ibuf);
*obuf = *ibuf;
/*
* Don't inherit the gift flag, we need to
* prevent multiple steals of this page.
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->len = len;
opipe->nrbufs++;
ibuf->offset += obuf->len;
ibuf->len -= obuf->len;
}
ret += obuf->len;
len -= obuf->len;
} while (len);
pipe_unlock(ipipe);
pipe_unlock(opipe);
/*
* If we put data in the output pipe, wakeup any potential readers.
*/
if (ret > 0)
wakeup_pipe_readers(opipe);
if (input_wakeup)
wakeup_pipe_writers(ipipe);
return ret;
}
/*
* Link contents of ipipe to opipe.
*/
static int link_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
int ret = 0, i = 0, nbuf;
/*
* Potential ABBA deadlock, work around it by ordering lock
* grabbing by pipe info address. Otherwise two different processes
* could deadlock (one doing tee from A -> B, the other from B -> A).
*/
pipe_double_lock(ipipe, opipe);
do {
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
/*
* If we have iterated all input buffers or ran out of
* output room, break.
*/
if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
break;
ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
/*
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
ibuf->ops->get(ipipe, ibuf);
obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
/*
* Don't inherit the gift flag, we need to
* prevent multiple steals of this page.
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
if (obuf->len > len)
obuf->len = len;
opipe->nrbufs++;
ret += obuf->len;
len -= obuf->len;
i++;
} while (len);
/*
* return EAGAIN if we have the potential of some data in the
* future, otherwise just return 0
*/
if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
ret = -EAGAIN;
pipe_unlock(ipipe);
pipe_unlock(opipe);
/*
* If we put data in the output pipe, wakeup any potential readers.
*/
if (ret > 0)
wakeup_pipe_readers(opipe);
return ret;
}
/*
* This is a tee(1) implementation that works on pipes. It doesn't copy
* any data, it simply references the 'in' pages on the 'out' pipe.
* The 'flags' used are the SPLICE_F_* variants, currently the only
* applicable one is SPLICE_F_NONBLOCK.
*/
static long do_tee(struct file *in, struct file *out, size_t len,
unsigned int flags)
{
struct pipe_inode_info *ipipe = get_pipe_info(in);
struct pipe_inode_info *opipe = get_pipe_info(out);
int ret = -EINVAL;
/*
* Duplicate the contents of ipipe to opipe without actually
* copying the data.
*/
if (ipipe && opipe && ipipe != opipe) {
/*
* Keep going, unless we encounter an error. The ipipe/opipe
* ordering doesn't really matter.
*/
ret = ipipe_prep(ipipe, flags);
if (!ret) {
ret = opipe_prep(opipe, flags);
if (!ret)
ret = link_pipe(ipipe, opipe, len, flags);
}
}
return ret;
}
SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
{
struct fd in;
int error;
if (unlikely(!len))
return 0;
error = -EBADF;
in = fdget(fdin);
if (in.file) {
if (in.file->f_mode & FMODE_READ) {
struct fd out = fdget(fdout);
if (out.file) {
if (out.file->f_mode & FMODE_WRITE)
error = do_tee(in.file, out.file,
len, flags);
fdput(out);
}
}
fdput(in);
}
return error;
}
| gpl-2.0 |
kingklick/kk-nexus-kernel | drivers/usb/misc/usbsevseg.c | 641 | 10924 | /*
* USB 7 Segment Driver
*
* Copyright (C) 2008 Harrison Metzger <harrisonmetz@gmail.com>
* Based on usbled.c by Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/usb.h>
#define DRIVER_AUTHOR "Harrison Metzger <harrisonmetz@gmail.com>"
#define DRIVER_DESC "USB 7 Segment Driver"
#define VENDOR_ID 0x0fc5
#define PRODUCT_ID 0x1227
#define MAXLEN 6
/* table of devices that work with this driver */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
/* the different text display modes the device is capable of */
static char *display_textmodes[] = {"raw", "hex", "ascii", NULL};
struct usb_sevsegdev {
struct usb_device *udev;
struct usb_interface *intf;
u8 powered;
u8 mode_msb;
u8 mode_lsb;
u8 decimals[MAXLEN];
u8 textmode;
u8 text[MAXLEN];
u16 textlength;
u8 shadow_power; /* for PM */
u8 has_interface_pm;
};
/* sysfs_streq can't replace this completely
* If the device was in hex mode, and the user wanted a 0,
* if str commands are used, we would assume the end of string
* so mem commands are used.
*/
inline size_t my_memlen(const char *buf, size_t count)
{
if (count > 0 && buf[count-1] == '\n')
return count - 1;
else
return count;
}
static void update_display_powered(struct usb_sevsegdev *mydev)
{
int rc;
if (mydev->powered && !mydev->has_interface_pm) {
rc = usb_autopm_get_interface(mydev->intf);
if (rc < 0)
return;
mydev->has_interface_pm = 1;
}
if (mydev->shadow_power != 1)
return;
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
0x48,
(80 * 0x100) + 10, /* (power mode) */
(0x00 * 0x100) + (mydev->powered ? 1 : 0),
NULL,
0,
2000);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc);
if (!mydev->powered && mydev->has_interface_pm) {
usb_autopm_put_interface(mydev->intf);
mydev->has_interface_pm = 0;
}
}
static void update_display_mode(struct usb_sevsegdev *mydev)
{
int rc;
if(mydev->shadow_power != 1)
return;
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
0x48,
(82 * 0x100) + 10, /* (set mode) */
(mydev->mode_msb * 0x100) + mydev->mode_lsb,
NULL,
0,
2000);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "mode retval = %d\n", rc);
}
static void update_display_visual(struct usb_sevsegdev *mydev, gfp_t mf)
{
int rc;
int i;
unsigned char *buffer;
u8 decimals = 0;
if(mydev->shadow_power != 1)
return;
buffer = kzalloc(MAXLEN, mf);
if (!buffer) {
dev_err(&mydev->udev->dev, "out of memory\n");
return;
}
/* The device is right to left, where as you write left to right */
for (i = 0; i < mydev->textlength; i++)
buffer[i] = mydev->text[mydev->textlength-1-i];
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
0x48,
(85 * 0x100) + 10, /* (write text) */
(0 * 0x100) + mydev->textmode, /* mode */
buffer,
mydev->textlength,
2000);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "write retval = %d\n", rc);
kfree(buffer);
/* The device is right to left, where as you write left to right */
for (i = 0; i < sizeof(mydev->decimals); i++)
decimals |= mydev->decimals[i] << i;
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
0x48,
(86 * 0x100) + 10, /* (set decimal) */
(0 * 0x100) + decimals, /* decimals */
NULL,
0,
2000);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "decimal retval = %d\n", rc);
}
#define MYDEV_ATTR_SIMPLE_UNSIGNED(name, update_fcn) \
static ssize_t show_attr_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_sevsegdev *mydev = usb_get_intfdata(intf); \
\
return sprintf(buf, "%u\n", mydev->name); \
} \
\
static ssize_t set_attr_##name(struct device *dev, \
struct device_attribute *attr, const char *buf, size_t count) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_sevsegdev *mydev = usb_get_intfdata(intf); \
\
mydev->name = simple_strtoul(buf, NULL, 10); \
update_fcn(mydev); \
\
return count; \
} \
static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name);
static ssize_t show_attr_text(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
return snprintf(buf, mydev->textlength, "%s\n", mydev->text);
}
static ssize_t set_attr_text(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
size_t end = my_memlen(buf, count);
if (end > sizeof(mydev->text))
return -EINVAL;
memset(mydev->text, 0, sizeof(mydev->text));
mydev->textlength = end;
if (end > 0)
memcpy(mydev->text, buf, end);
update_display_visual(mydev, GFP_KERNEL);
return count;
}
static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text);
static ssize_t show_attr_decimals(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
int i;
int pos;
for (i = 0; i < sizeof(mydev->decimals); i++) {
pos = sizeof(mydev->decimals) - 1 - i;
if (mydev->decimals[i] == 0)
buf[pos] = '0';
else if (mydev->decimals[i] == 1)
buf[pos] = '1';
else
buf[pos] = 'x';
}
buf[sizeof(mydev->decimals)] = '\n';
return sizeof(mydev->decimals) + 1;
}
static ssize_t set_attr_decimals(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
size_t end = my_memlen(buf, count);
int i;
if (end > sizeof(mydev->decimals))
return -EINVAL;
for (i = 0; i < end; i++)
if (buf[i] != '0' && buf[i] != '1')
return -EINVAL;
memset(mydev->decimals, 0, sizeof(mydev->decimals));
for (i = 0; i < end; i++)
if (buf[i] == '1')
mydev->decimals[end-1-i] = 1;
update_display_visual(mydev, GFP_KERNEL);
return count;
}
static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO,
show_attr_decimals, set_attr_decimals);
static ssize_t show_attr_textmode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
int i;
buf[0] = 0;
for (i = 0; display_textmodes[i]; i++) {
if (mydev->textmode == i) {
strcat(buf, " [");
strcat(buf, display_textmodes[i]);
strcat(buf, "] ");
} else {
strcat(buf, " ");
strcat(buf, display_textmodes[i]);
strcat(buf, " ");
}
}
strcat(buf, "\n");
return strlen(buf);
}
static ssize_t set_attr_textmode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
int i;
for (i = 0; display_textmodes[i]; i++) {
if (sysfs_streq(display_textmodes[i], buf)) {
mydev->textmode = i;
update_display_visual(mydev, GFP_KERNEL);
return count;
}
}
return -EINVAL;
}
static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO,
show_attr_textmode, set_attr_textmode);
MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
MYDEV_ATTR_SIMPLE_UNSIGNED(mode_msb, update_display_mode);
MYDEV_ATTR_SIMPLE_UNSIGNED(mode_lsb, update_display_mode);
static struct attribute *dev_attrs[] = {
&dev_attr_powered.attr,
&dev_attr_text.attr,
&dev_attr_textmode.attr,
&dev_attr_decimals.attr,
&dev_attr_mode_msb.attr,
&dev_attr_mode_lsb.attr,
NULL
};
static struct attribute_group dev_attr_grp = {
.attrs = dev_attrs,
};
static int sevseg_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_sevsegdev *mydev = NULL;
int rc = -ENOMEM;
mydev = kzalloc(sizeof(struct usb_sevsegdev), GFP_KERNEL);
if (mydev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
goto error_mem;
}
mydev->udev = usb_get_dev(udev);
mydev->intf = interface;
usb_set_intfdata(interface, mydev);
/* PM */
mydev->shadow_power = 1; /* currently active */
mydev->has_interface_pm = 0; /* have not issued autopm_get */
/*set defaults */
mydev->textmode = 0x02; /* ascii mode */
mydev->mode_msb = 0x06; /* 6 characters */
mydev->mode_lsb = 0x3f; /* scanmode for 6 chars */
rc = sysfs_create_group(&interface->dev.kobj, &dev_attr_grp);
if (rc)
goto error;
dev_info(&interface->dev, "USB 7 Segment device now attached\n");
return 0;
error:
usb_set_intfdata(interface, NULL);
usb_put_dev(mydev->udev);
kfree(mydev);
error_mem:
return rc;
}
static void sevseg_disconnect(struct usb_interface *interface)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(interface);
sysfs_remove_group(&interface->dev.kobj, &dev_attr_grp);
usb_set_intfdata(interface, NULL);
usb_put_dev(mydev->udev);
kfree(mydev);
dev_info(&interface->dev, "USB 7 Segment now disconnected\n");
}
static int sevseg_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(intf);
mydev->shadow_power = 0;
return 0;
}
static int sevseg_resume(struct usb_interface *intf)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(intf);
mydev->shadow_power = 1;
update_display_mode(mydev);
update_display_visual(mydev, GFP_NOIO);
return 0;
}
static int sevseg_reset_resume(struct usb_interface *intf)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(intf);
mydev->shadow_power = 1;
update_display_mode(mydev);
update_display_visual(mydev, GFP_NOIO);
return 0;
}
static struct usb_driver sevseg_driver = {
.name = "usbsevseg",
.probe = sevseg_probe,
.disconnect = sevseg_disconnect,
.suspend = sevseg_suspend,
.resume = sevseg_resume,
.reset_resume = sevseg_reset_resume,
.id_table = id_table,
.supports_autosuspend = 1,
};
static int __init usb_sevseg_init(void)
{
int rc = 0;
rc = usb_register(&sevseg_driver);
if (rc)
err("usb_register failed. Error number %d", rc);
return rc;
}
static void __exit usb_sevseg_exit(void)
{
usb_deregister(&sevseg_driver);
}
module_init(usb_sevseg_init);
module_exit(usb_sevseg_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
bigzz/eas-backports | drivers/gpu/drm/radeon/radeon_atpx_handler.c | 1153 | 14530 | /*
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <airlied@redhat.com>
*
* Licensed under GPLv2
*
* ATPX support for both Intel/ATI
*/
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <linux/pci.h>
#include "radeon_acpi.h"
struct radeon_atpx_functions {
bool px_params;
bool power_cntl;
bool disp_mux_cntl;
bool i2c_mux_cntl;
bool switch_start;
bool switch_end;
bool disp_connectors_mapping;
bool disp_detetion_ports;
};
struct radeon_atpx {
acpi_handle handle;
struct radeon_atpx_functions functions;
};
static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
struct radeon_atpx atpx;
} radeon_atpx_priv;
struct atpx_verify_interface {
u16 size; /* structure size in bytes (includes size field) */
u16 version; /* version */
u32 function_bits; /* supported functions bit vector */
} __packed;
struct atpx_px_params {
u16 size; /* structure size in bytes (includes size field) */
u32 valid_flags; /* which flags are valid */
u32 flags; /* flags */
} __packed;
struct atpx_power_control {
u16 size;
u8 dgpu_state;
} __packed;
struct atpx_mux {
u16 size;
u16 mux;
} __packed;
/**
* radeon_atpx_call - call an ATPX method
*
* @handle: acpi handle
* @function: the ATPX function to execute
* @params: ATPX function params
*
* Executes the requested ATPX function (all asics).
* Returns a pointer to the acpi output buffer.
*/
static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
struct acpi_buffer *params)
{
acpi_status status;
union acpi_object atpx_arg_elements[2];
struct acpi_object_list atpx_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
atpx_arg.count = 2;
atpx_arg.pointer = &atpx_arg_elements[0];
atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[0].integer.value = function;
if (params) {
atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
atpx_arg_elements[1].buffer.length = params->length;
atpx_arg_elements[1].buffer.pointer = params->pointer;
} else {
/* We need a second fake parameter */
atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[1].integer.value = 0;
}
status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
/* Fail only if calling the method fails and ATPX is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk("failed to evaluate ATPX got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
return NULL;
}
return buffer.pointer;
}
/**
* radeon_atpx_parse_functions - parse supported functions
*
* @f: supported functions struct
* @mask: supported functions mask from ATPX
*
* Use the supported functions mask from ATPX function
* ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
* are supported (all asics).
*/
static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask)
{
f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
}
/**
* radeon_atpx_validate_functions - validate ATPX functions
*
* @atpx: radeon atpx struct
*
* Validate that required functions are enabled (all asics).
* returns 0 on success, error on failure.
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
/* make sure required functions are enabled */
/* dGPU power control is required */
atpx->functions.power_cntl = true;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
size_t size;
u32 valid_bits;
info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
if (!info)
return -EIO;
memset(&output, 0, sizeof(output));
size = *(u16 *) info->buffer.pointer;
if (size < 10) {
printk("ATPX buffer is too small: %zu\n", size);
kfree(info);
return -EINVAL;
}
size = min(sizeof(output), size);
memcpy(&output, info->buffer.pointer, size);
valid_bits = output.flags & output.valid_flags;
/* if separate mux flag is set, mux controls are required */
if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
atpx->functions.i2c_mux_cntl = true;
atpx->functions.disp_mux_cntl = true;
}
/* if any outputs are muxed, mux controls are required */
if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
ATPX_TV_SIGNAL_MUXED |
ATPX_DFP_SIGNAL_MUXED))
atpx->functions.disp_mux_cntl = true;
kfree(info);
}
return 0;
}
/**
* radeon_atpx_verify_interface - verify ATPX
*
* @atpx: radeon atpx struct
*
* Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
* to initialize ATPX and determine what features are supported
* (all asics).
* returns 0 on success, error on failure.
*/
static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
{
union acpi_object *info;
struct atpx_verify_interface output;
size_t size;
int err = 0;
info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
memset(&output, 0, sizeof(output));
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
printk("ATPX buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
size = min(sizeof(output), size);
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
printk("ATPX version %u, functions 0x%08x\n",
output.version, output.function_bits);
radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
out:
kfree(info);
return err;
}
/**
* radeon_atpx_set_discrete_state - power up/down discrete GPU
*
* @atpx: atpx info struct
* @state: discrete GPU state (0 = power down, 1 = power up)
*
* Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
* power down/up the discrete GPU (all asics).
* Returns 0 on success, error on failure.
*/
static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_power_control input;
if (atpx->functions.power_cntl) {
input.size = 3;
input.dgpu_state = state;
params.length = input.size;
params.pointer = &input;
info = radeon_atpx_call(atpx->handle,
ATPX_FUNCTION_POWER_CONTROL,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* radeon_atpx_switch_disp_mux - switch display mux
*
* @atpx: atpx info struct
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
*
* Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
* switch the display mux between the discrete GPU and integrated GPU
* (all asics).
* Returns 0 on success, error on failure.
*/
static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_mux input;
if (atpx->functions.disp_mux_cntl) {
input.size = 4;
input.mux = mux_id;
params.length = input.size;
params.pointer = &input;
info = radeon_atpx_call(atpx->handle,
ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* radeon_atpx_switch_i2c_mux - switch i2c/hpd mux
*
* @atpx: atpx info struct
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
*
* Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
* switch the i2c/hpd mux between the discrete GPU and integrated GPU
* (all asics).
* Returns 0 on success, error on failure.
*/
static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_mux input;
if (atpx->functions.i2c_mux_cntl) {
input.size = 4;
input.mux = mux_id;
params.length = input.size;
params.pointer = &input;
info = radeon_atpx_call(atpx->handle,
ATPX_FUNCTION_I2C_MUX_CONTROL,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* radeon_atpx_switch_start - notify the sbios of a GPU switch
*
* @atpx: atpx info struct
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
*
* Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
* function to notify the sbios that a switch between the discrete GPU and
* integrated GPU has begun (all asics).
* Returns 0 on success, error on failure.
*/
static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_mux input;
if (atpx->functions.switch_start) {
input.size = 4;
input.mux = mux_id;
params.length = input.size;
params.pointer = &input;
info = radeon_atpx_call(atpx->handle,
ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* radeon_atpx_switch_end - notify the sbios of a GPU switch
*
* @atpx: atpx info struct
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
*
* Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
* function to notify the sbios that a switch between the discrete GPU and
* integrated GPU has ended (all asics).
* Returns 0 on success, error on failure.
*/
static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_mux input;
if (atpx->functions.switch_end) {
input.size = 4;
input.mux = mux_id;
params.length = input.size;
params.pointer = &input;
info = radeon_atpx_call(atpx->handle,
ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* radeon_atpx_switchto - switch to the requested GPU
*
* @id: GPU to switch to
*
* Execute the necessary ATPX functions to switch between the discrete GPU and
* integrated GPU (all asics).
* Returns 0 on success, error on failure.
*/
static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
{
u16 gpu_id;
if (id == VGA_SWITCHEROO_IGD)
gpu_id = ATPX_INTEGRATED_GPU;
else
gpu_id = ATPX_DISCRETE_GPU;
radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id);
radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id);
radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id);
radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id);
return 0;
}
/**
* radeon_atpx_power_state - power down/up the requested GPU
*
* @id: GPU to power down/up
* @state: requested power state (0 = off, 1 = on)
*
* Execute the necessary ATPX function to power down/up the discrete GPU
* (all asics).
* Returns 0 on success, error on failure.
*/
static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state)
{
/* on w500 ACPI can't change intel gpu state */
if (id == VGA_SWITCHEROO_IGD)
return 0;
radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state);
return 0;
}
/**
* radeon_atpx_pci_probe_handle - look up the ATPX handle
*
* @pdev: pci device
*
* Look up the ATPX handles (all asics).
* Returns true if the handles are found, false if not.
*/
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
{
acpi_handle dhandle, atpx_handle;
acpi_status status;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
if (ACPI_FAILURE(status))
return false;
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
return true;
}
/**
* radeon_atpx_init - verify the ATPX interface
*
* Verify the ATPX interface (all asics).
* Returns 0 on success, error on failure.
*/
static int radeon_atpx_init(void)
{
int r;
/* set up the ATPX handle */
r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
if (r)
return r;
/* validate the atpx setup */
r = radeon_atpx_validate(&radeon_atpx_priv.atpx);
if (r)
return r;
return 0;
}
/**
* radeon_atpx_get_client_id - get the client id
*
* @pdev: pci device
*
* look up whether we are the integrated or discrete GPU (all asics).
* Returns the client id.
*/
static int radeon_atpx_get_client_id(struct pci_dev *pdev)
{
if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
else
return VGA_SWITCHEROO_DIS;
}
static struct vga_switcheroo_handler radeon_atpx_handler = {
.switchto = radeon_atpx_switchto,
.power_state = radeon_atpx_power_state,
.init = radeon_atpx_init,
.get_client_id = radeon_atpx_get_client_id,
};
/**
* radeon_atpx_detect - detect whether we have PX
*
* Check if we have a PX system (all asics).
* Returns true if we have a PX system, false if not.
*/
static bool radeon_atpx_detect(void)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
}
/* some newer PX laptops mark the dGPU as a non-VGA display device */
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
}
if (has_atpx && vga_count == 2) {
acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
return true;
}
return false;
}
/**
* radeon_register_atpx_handler - register with vga_switcheroo
*
* Register the PX callbacks with vga_switcheroo (all asics).
*/
void radeon_register_atpx_handler(void)
{
bool r;
/* detect if we have any ATPX + 2 VGA in the system */
r = radeon_atpx_detect();
if (!r)
return;
vga_switcheroo_register_handler(&radeon_atpx_handler);
}
/**
* radeon_unregister_atpx_handler - unregister with vga_switcheroo
*
* Unregister the PX callbacks with vga_switcheroo (all asics).
*/
void radeon_unregister_atpx_handler(void)
{
vga_switcheroo_unregister_handler();
}
| gpl-2.0 |
EPDCenterSpain/kernel_Archos_97b_Titan | arch/powerpc/kernel/perf_event_fsl_emb.c | 2433 | 14598 | /*
* Performance event support - Freescale Embedded Performance Monitor
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg_fsl_emb.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int disabled;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static struct fsl_emb_pmu *ppmu;
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
#ifdef __powerpc64__
return !regs->softe;
#else
return 0;
#endif
}
static void perf_event_interrupt(struct pt_regs *regs);
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 0:
val = mfpmr(PMRN_PMC0);
break;
case 1:
val = mfpmr(PMRN_PMC1);
break;
case 2:
val = mfpmr(PMRN_PMC2);
break;
case 3:
val = mfpmr(PMRN_PMC3);
break;
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
isync();
}
/*
* Write one local control A register
*/
static void write_pmlca(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCA0, val);
break;
case 1:
mtpmr(PMRN_PMLCA1, val);
break;
case 2:
mtpmr(PMRN_PMLCA2, val);
break;
case 3:
mtpmr(PMRN_PMLCA3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
}
isync();
}
/*
* Write one local control B register
*/
static void write_pmlcb(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCB0, val);
break;
case 1:
mtpmr(PMRN_PMLCB1, val);
break;
case 2:
mtpmr(PMRN_PMLCB2, val);
break;
case 3:
mtpmr(PMRN_PMLCB3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
}
isync();
}
static void fsl_emb_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void fsl_emb_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
if (atomic_read(&num_events)) {
/*
* Set the 'freeze all counters' bit, and disable
* interrupts. The barrier is to make sure the
* mtpmr has been executed and the PMU has frozen
* the events before we return.
*/
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void fsl_emb_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled)
goto out;
cpuhw->disabled = 0;
ppc_set_pmu_inuse(cpuhw->n_events != 0);
if (cpuhw->n_events > 0) {
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[])
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
n++;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
n++;
}
}
return n;
}
/* context locked on entry */
static int fsl_emb_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int ret = -EAGAIN;
int num_counters = ppmu->n_counter;
u64 val;
int i;
perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_counters = ppmu->n_restricted;
/*
* Allocate counters from top-down, so that restricted-capable
* counters are kept free as long as possible.
*/
for (i = num_counters - 1; i >= 0; i--) {
if (cpuhw->event[i])
continue;
break;
}
if (i < 0)
goto out;
event->hw.idx = i;
cpuhw->event[i] = event;
++cpuhw->n_events;
val = 0;
if (event->hw.sample_period) {
s64 left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
if (!(flags & PERF_EF_START)) {
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
val = 0;
}
write_pmc(i, val);
perf_event_update_userpage(event);
write_pmlcb(i, event->hw.config >> 32);
write_pmlca(i, event->hw.config_base);
ret = 0;
out:
put_cpu_var(cpu_hw_events);
perf_pmu_enable(event->pmu);
return ret;
}
/* context locked on entry */
static void fsl_emb_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
perf_pmu_disable(event->pmu);
if (i < 0)
goto out;
fsl_emb_pmu_read(event);
cpuhw = &get_cpu_var(cpu_hw_events);
WARN_ON(event != cpuhw->event[event->hw.idx]);
write_pmlca(i, 0);
write_pmlcb(i, 0);
write_pmc(i, 0);
cpuhw->event[i] = NULL;
event->hw.idx = -1;
/*
* TODO: if at least one restricted event exists, and we
* just freed up a non-restricted-capable counter, and
* there is a restricted-capable counter occupied by
* a non-restricted event, migrate that event to the
* vacated counter.
*/
cpuhw->n_events--;
out:
perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}
static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
write_pmc(event->hw.idx, left);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
fsl_emb_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static int fsl_emb_pmu_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
int n;
int err;
int num_restricted;
int i;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return -ENOENT;
}
event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
return -EINVAL;
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
return -EINVAL;
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
num_restricted = 0;
for (i = 0; i < n; i++) {
if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_restricted++;
}
if (num_restricted >= ppmu->n_restricted)
return -EINVAL;
}
event->hw.idx = -1;
event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
(u32)((ev << 16) & PMLCA_EVENT_MASK);
if (event->attr.exclude_user)
event->hw.config_base |= PMLCA_FCU;
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
return -ENOTSUPP;
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
event->destroy = hw_perf_event_destroy;
return err;
}
static struct pmu fsl_emb_pmu = {
.pmu_enable = fsl_emb_pmu_enable,
.pmu_disable = fsl_emb_pmu_disable,
.event_init = fsl_emb_pmu_event_init,
.add = fsl_emb_pmu_add,
.del = fsl_emb_pmu_del,
.start = fsl_emb_pmu_start,
.stop = fsl_emb_pmu_stop,
.read = fsl_emb_pmu_read,
};
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
if (perf_event_overflow(event, nmi, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
}
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
val = read_pmc(i);
if ((int)val < 0) {
if (event) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
} else {
/*
* Disabled counter is negative,
* reset it just in case.
*/
write_pmc(i, 0);
}
}
}
/* PMM will keep counters frozen until we return from the interrupt. */
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
if (nmi)
nmi_exit();
else
irq_exit();
}
void hw_perf_event_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(*cpuhw));
}
int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
| gpl-2.0 |
visi0nary/mediatek | mt6732/kernel/drivers/media/pci/cx23885/altera-ci.c | 2945 | 20984 | /*
* altera-ci.c
*
* CI driver in conjunction with NetUp Dual DVB-T/C RF CI card
*
* Copyright (C) 2010,2011 NetUP Inc.
* Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* currently cx23885 GPIO's used.
* GPIO-0 ~INT in
* GPIO-1 TMS out
* GPIO-2 ~reset chips out
* GPIO-3 to GPIO-10 data/addr for CA in/out
* GPIO-11 ~CS out
* GPIO-12 AD_RG out
* GPIO-13 ~WR out
* GPIO-14 ~RD out
* GPIO-15 ~RDY in
* GPIO-16 TCK out
* GPIO-17 TDO in
* GPIO-18 TDI out
*/
/*
* Bit definitions for MC417_RWD and MC417_OEN registers
* bits 31-16
* +-----------+
* | Reserved |
* +-----------+
* bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8
* +-------+-------+-------+-------+-------+-------+-------+-------+
* | TDI | TDO | TCK | RDY# | #RD | #WR | AD_RG | #CS |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
* +-------+-------+-------+-------+-------+-------+-------+-------+
*/
#include <media/videobuf-dma-sg.h>
#include <media/videobuf-dvb.h>
#include "altera-ci.h"
#include "dvb_ca_en50221.h"
/* FPGA regs */
#define NETUP_CI_INT_CTRL 0x00
#define NETUP_CI_BUSCTRL2 0x01
#define NETUP_CI_ADDR0 0x04
#define NETUP_CI_ADDR1 0x05
#define NETUP_CI_DATA 0x06
#define NETUP_CI_BUSCTRL 0x07
#define NETUP_CI_PID_ADDR0 0x08
#define NETUP_CI_PID_ADDR1 0x09
#define NETUP_CI_PID_DATA 0x0a
#define NETUP_CI_TSA_DIV 0x0c
#define NETUP_CI_TSB_DIV 0x0d
#define NETUP_CI_REVISION 0x0f
/* const for ci op */
#define NETUP_CI_FLG_CTL 1
#define NETUP_CI_FLG_RD 1
#define NETUP_CI_FLG_AD 1
static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
MODULE_PARM_DESC(ci_dbg, "Enable CI debugging");
static unsigned int pid_dbg;
module_param(pid_dbg, int, 0644);
MODULE_PARM_DESC(pid_dbg, "Enable PID filtering debugging");
MODULE_DESCRIPTION("altera FPGA CI module");
MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>");
MODULE_LICENSE("GPL");
#define ci_dbg_print(args...) \
do { \
if (ci_dbg) \
printk(KERN_DEBUG args); \
} while (0)
#define pid_dbg_print(args...) \
do { \
if (pid_dbg) \
printk(KERN_DEBUG args); \
} while (0)
struct altera_ci_state;
struct netup_hw_pid_filter;
struct fpga_internal {
void *dev;
struct mutex fpga_mutex;/* two CI's on the same fpga */
struct netup_hw_pid_filter *pid_filt[2];
struct altera_ci_state *state[2];
struct work_struct work;
int (*fpga_rw) (void *dev, int flag, int data, int rw);
int cis_used;
int filts_used;
int strt_wrk;
};
/* stores all private variables for communication with CI */
struct altera_ci_state {
struct fpga_internal *internal;
struct dvb_ca_en50221 ca;
int status;
int nr;
};
/* stores all private variables for hardware pid filtering */
struct netup_hw_pid_filter {
struct fpga_internal *internal;
struct dvb_demux *demux;
/* save old functions */
int (*start_feed)(struct dvb_demux_feed *feed);
int (*stop_feed)(struct dvb_demux_feed *feed);
int status;
int nr;
};
/* internal params node */
struct fpga_inode {
/* pointer for internal params, one for each pair of CI's */
struct fpga_internal *internal;
struct fpga_inode *next_inode;
};
/* first internal params */
static struct fpga_inode *fpga_first_inode;
/* find chip by dev */
static struct fpga_inode *find_inode(void *dev)
{
struct fpga_inode *temp_chip = fpga_first_inode;
if (temp_chip == NULL)
return temp_chip;
/*
Search for the last fpga CI chip or
find it by dev */
while ((temp_chip != NULL) &&
(temp_chip->internal->dev != dev))
temp_chip = temp_chip->next_inode;
return temp_chip;
}
/* check demux */
static struct fpga_internal *check_filter(struct fpga_internal *temp_int,
void *demux_dev, int filt_nr)
{
if (temp_int == NULL)
return NULL;
if ((temp_int->pid_filt[filt_nr]) == NULL)
return NULL;
if (temp_int->pid_filt[filt_nr]->demux == demux_dev)
return temp_int;
return NULL;
}
/* find chip by demux */
static struct fpga_inode *find_dinode(void *demux_dev)
{
struct fpga_inode *temp_chip = fpga_first_inode;
struct fpga_internal *temp_int;
/*
* Search of the last fpga CI chip or
* find it by demux
*/
while (temp_chip != NULL) {
if (temp_chip->internal != NULL) {
temp_int = temp_chip->internal;
if (check_filter(temp_int, demux_dev, 0))
break;
if (check_filter(temp_int, demux_dev, 1))
break;
}
temp_chip = temp_chip->next_inode;
}
return temp_chip;
}
/* deallocating chip */
static void remove_inode(struct fpga_internal *internal)
{
struct fpga_inode *prev_node = fpga_first_inode;
struct fpga_inode *del_node = find_inode(internal->dev);
if (del_node != NULL) {
if (del_node == fpga_first_inode) {
fpga_first_inode = del_node->next_inode;
} else {
while (prev_node->next_inode != del_node)
prev_node = prev_node->next_inode;
if (del_node->next_inode == NULL)
prev_node->next_inode = NULL;
else
prev_node->next_inode =
prev_node->next_inode->next_inode;
}
kfree(del_node);
}
}
/* allocating new chip */
static struct fpga_inode *append_internal(struct fpga_internal *internal)
{
struct fpga_inode *new_node = fpga_first_inode;
if (new_node == NULL) {
new_node = kmalloc(sizeof(struct fpga_inode), GFP_KERNEL);
fpga_first_inode = new_node;
} else {
while (new_node->next_inode != NULL)
new_node = new_node->next_inode;
new_node->next_inode =
kmalloc(sizeof(struct fpga_inode), GFP_KERNEL);
if (new_node->next_inode != NULL)
new_node = new_node->next_inode;
else
new_node = NULL;
}
if (new_node != NULL) {
new_node->internal = internal;
new_node->next_inode = NULL;
}
return new_node;
}
static int netup_fpga_op_rw(struct fpga_internal *inter, int addr,
u8 val, u8 read)
{
inter->fpga_rw(inter->dev, NETUP_CI_FLG_AD, addr, 0);
return inter->fpga_rw(inter->dev, 0, val, read);
}
/* flag - mem/io, read - read/write */
static int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 val)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
u8 store;
int mem = 0;
if (0 != slot)
return -EINVAL;
mutex_lock(&inter->fpga_mutex);
netup_fpga_op_rw(inter, NETUP_CI_ADDR0, ((addr << 1) & 0xfe), 0);
netup_fpga_op_rw(inter, NETUP_CI_ADDR1, ((addr >> 7) & 0x7f), 0);
store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD);
store &= 0x0f;
store |= ((state->nr << 7) | (flag << 6));
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, store, 0);
mem = netup_fpga_op_rw(inter, NETUP_CI_DATA, val, read);
mutex_unlock(&inter->fpga_mutex);
ci_dbg_print("%s: %s: addr=[0x%02x], %s=%x\n", __func__,
(read) ? "read" : "write", addr,
(flag == NETUP_CI_FLG_CTL) ? "ctl" : "mem",
(read) ? mem : val);
return mem;
}
static int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr)
{
return altera_ci_op_cam(en50221, slot, 0, NETUP_CI_FLG_RD, addr, 0);
}
static int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
static int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221,
int slot, u8 addr)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL,
NETUP_CI_FLG_RD, addr, 0);
}
static int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
u8 addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL, 0, addr, data);
}
static int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
/* reasonable timeout for CI reset is 10 seconds */
unsigned long t_out = jiffies + msecs_to_jiffies(9999);
int ret;
ci_dbg_print("%s\n", __func__);
if (0 != slot)
return -EINVAL;
mutex_lock(&inter->fpga_mutex);
ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD);
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL,
(ret & 0xcf) | (1 << (5 - state->nr)), 0);
mutex_unlock(&inter->fpga_mutex);
for (;;) {
mdelay(50);
mutex_lock(&inter->fpga_mutex);
ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL,
0, NETUP_CI_FLG_RD);
mutex_unlock(&inter->fpga_mutex);
if ((ret & (1 << (5 - state->nr))) == 0)
break;
if (time_after(jiffies, t_out))
break;
}
ci_dbg_print("%s: %d msecs\n", __func__,
jiffies_to_msecs(jiffies + msecs_to_jiffies(9999) - t_out));
return 0;
}
static int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
/* not implemented */
return 0;
}
static int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
int ret;
ci_dbg_print("%s\n", __func__);
if (0 != slot)
return -EINVAL;
mutex_lock(&inter->fpga_mutex);
ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD);
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL,
(ret & 0x0f) | (1 << (3 - state->nr)), 0);
mutex_unlock(&inter->fpga_mutex);
return 0;
}
/* work handler */
static void netup_read_ci_status(struct work_struct *work)
{
struct fpga_internal *inter =
container_of(work, struct fpga_internal, work);
int ret;
ci_dbg_print("%s\n", __func__);
mutex_lock(&inter->fpga_mutex);
/* ack' irq */
ret = netup_fpga_op_rw(inter, NETUP_CI_INT_CTRL, 0, NETUP_CI_FLG_RD);
ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD);
mutex_unlock(&inter->fpga_mutex);
if (inter->state[1] != NULL) {
inter->state[1]->status =
((ret & 1) == 0 ?
DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY : 0);
ci_dbg_print("%s: setting CI[1] status = 0x%x\n",
__func__, inter->state[1]->status);
}
if (inter->state[0] != NULL) {
inter->state[0]->status =
((ret & 2) == 0 ?
DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY : 0);
ci_dbg_print("%s: setting CI[0] status = 0x%x\n",
__func__, inter->state[0]->status);
}
}
/* CI irq handler */
int altera_ci_irq(void *dev)
{
struct fpga_inode *temp_int = NULL;
struct fpga_internal *inter = NULL;
ci_dbg_print("%s\n", __func__);
if (dev != NULL) {
temp_int = find_inode(dev);
if (temp_int != NULL) {
inter = temp_int->internal;
schedule_work(&inter->work);
}
}
return 1;
}
EXPORT_SYMBOL(altera_ci_irq);
static int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
int slot, int open)
{
struct altera_ci_state *state = en50221->data;
if (0 != slot)
return -EINVAL;
return state->status;
}
static void altera_hw_filt_release(void *main_dev, int filt_nr)
{
struct fpga_inode *temp_int = find_inode(main_dev);
struct netup_hw_pid_filter *pid_filt = NULL;
ci_dbg_print("%s\n", __func__);
if (temp_int != NULL) {
pid_filt = temp_int->internal->pid_filt[filt_nr - 1];
/* stored old feed controls */
pid_filt->demux->start_feed = pid_filt->start_feed;
pid_filt->demux->stop_feed = pid_filt->stop_feed;
if (((--(temp_int->internal->filts_used)) <= 0) &&
((temp_int->internal->cis_used) <= 0)) {
ci_dbg_print("%s: Actually removing\n", __func__);
remove_inode(temp_int->internal);
kfree(pid_filt->internal);
}
kfree(pid_filt);
}
}
EXPORT_SYMBOL(altera_hw_filt_release);
void altera_ci_release(void *dev, int ci_nr)
{
struct fpga_inode *temp_int = find_inode(dev);
struct altera_ci_state *state = NULL;
ci_dbg_print("%s\n", __func__);
if (temp_int != NULL) {
state = temp_int->internal->state[ci_nr - 1];
altera_hw_filt_release(dev, ci_nr);
if (((temp_int->internal->filts_used) <= 0) &&
((--(temp_int->internal->cis_used)) <= 0)) {
ci_dbg_print("%s: Actually removing\n", __func__);
remove_inode(temp_int->internal);
kfree(state->internal);
}
if (state != NULL) {
if (state->ca.data != NULL)
dvb_ca_en50221_release(&state->ca);
kfree(state);
}
}
}
EXPORT_SYMBOL(altera_ci_release);
static void altera_pid_control(struct netup_hw_pid_filter *pid_filt,
u16 pid, int onoff)
{
struct fpga_internal *inter = pid_filt->internal;
u8 store = 0;
/* pid 0-0x1f always enabled, don't touch them */
if ((pid == 0x2000) || (pid < 0x20))
return;
mutex_lock(&inter->fpga_mutex);
netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR0, (pid >> 3) & 0xff, 0);
netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR1,
((pid >> 11) & 0x03) | (pid_filt->nr << 2), 0);
store = netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, 0, NETUP_CI_FLG_RD);
if (onoff)/* 0 - on, 1 - off */
store |= (1 << (pid & 7));
else
store &= ~(1 << (pid & 7));
netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, store, 0);
mutex_unlock(&inter->fpga_mutex);
pid_dbg_print("%s: (%d) set pid: %5d 0x%04x '%s'\n", __func__,
pid_filt->nr, pid, pid, onoff ? "off" : "on");
}
static void altera_toggle_fullts_streaming(struct netup_hw_pid_filter *pid_filt,
int filt_nr, int onoff)
{
struct fpga_internal *inter = pid_filt->internal;
u8 store = 0;
int i;
pid_dbg_print("%s: pid_filt->nr[%d] now %s\n", __func__, pid_filt->nr,
onoff ? "off" : "on");
if (onoff)/* 0 - on, 1 - off */
store = 0xff;/* ignore pid */
else
store = 0;/* enable pid */
mutex_lock(&inter->fpga_mutex);
for (i = 0; i < 1024; i++) {
netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR0, i & 0xff, 0);
netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR1,
((i >> 8) & 0x03) | (pid_filt->nr << 2), 0);
/* pid 0-0x1f always enabled */
netup_fpga_op_rw(inter, NETUP_CI_PID_DATA,
(i > 3 ? store : 0), 0);
}
mutex_unlock(&inter->fpga_mutex);
}
static int altera_pid_feed_control(void *demux_dev, int filt_nr,
struct dvb_demux_feed *feed, int onoff)
{
struct fpga_inode *temp_int = find_dinode(demux_dev);
struct fpga_internal *inter = temp_int->internal;
struct netup_hw_pid_filter *pid_filt = inter->pid_filt[filt_nr - 1];
altera_pid_control(pid_filt, feed->pid, onoff ? 0 : 1);
/* call old feed proc's */
if (onoff)
pid_filt->start_feed(feed);
else
pid_filt->stop_feed(feed);
if (feed->pid == 0x2000)
altera_toggle_fullts_streaming(pid_filt, filt_nr,
onoff ? 0 : 1);
return 0;
}
EXPORT_SYMBOL(altera_pid_feed_control);
static int altera_ci_start_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 1);
return 0;
}
static int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 0);
return 0;
}
static int altera_ci_start_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 1);
}
static int altera_ci_stop_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 1);
}
static int altera_ci_start_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 2);
}
static int altera_ci_stop_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 2);
}
static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
{
struct netup_hw_pid_filter *pid_filt = NULL;
struct fpga_inode *temp_int = find_inode(config->dev);
struct fpga_internal *inter = NULL;
int ret = 0;
pid_filt = kzalloc(sizeof(struct netup_hw_pid_filter), GFP_KERNEL);
ci_dbg_print("%s\n", __func__);
if (!pid_filt) {
ret = -ENOMEM;
goto err;
}
if (temp_int != NULL) {
inter = temp_int->internal;
(inter->filts_used)++;
ci_dbg_print("%s: Find Internal Structure!\n", __func__);
} else {
inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL);
if (!inter) {
ret = -ENOMEM;
goto err;
}
temp_int = append_internal(inter);
inter->filts_used = 1;
inter->dev = config->dev;
inter->fpga_rw = config->fpga_rw;
mutex_init(&inter->fpga_mutex);
inter->strt_wrk = 1;
ci_dbg_print("%s: Create New Internal Structure!\n", __func__);
}
ci_dbg_print("%s: setting hw pid filter = %p for ci = %d\n", __func__,
pid_filt, hw_filt_nr - 1);
inter->pid_filt[hw_filt_nr - 1] = pid_filt;
pid_filt->demux = config->demux;
pid_filt->internal = inter;
pid_filt->nr = hw_filt_nr - 1;
/* store old feed controls */
pid_filt->start_feed = config->demux->start_feed;
pid_filt->stop_feed = config->demux->stop_feed;
/* replace with new feed controls */
if (hw_filt_nr == 1) {
pid_filt->demux->start_feed = altera_ci_start_feed_1;
pid_filt->demux->stop_feed = altera_ci_stop_feed_1;
} else if (hw_filt_nr == 2) {
pid_filt->demux->start_feed = altera_ci_start_feed_2;
pid_filt->demux->stop_feed = altera_ci_stop_feed_2;
}
altera_toggle_fullts_streaming(pid_filt, 0, 1);
return 0;
err:
ci_dbg_print("%s: Can't init hardware filter: Error %d\n",
__func__, ret);
kfree(pid_filt);
return ret;
}
EXPORT_SYMBOL(altera_hw_filt_init);
int altera_ci_init(struct altera_ci_config *config, int ci_nr)
{
struct altera_ci_state *state;
struct fpga_inode *temp_int = find_inode(config->dev);
struct fpga_internal *inter = NULL;
int ret = 0;
u8 store = 0;
state = kzalloc(sizeof(struct altera_ci_state), GFP_KERNEL);
ci_dbg_print("%s\n", __func__);
if (!state) {
ret = -ENOMEM;
goto err;
}
if (temp_int != NULL) {
inter = temp_int->internal;
(inter->cis_used)++;
inter->fpga_rw = config->fpga_rw;
ci_dbg_print("%s: Find Internal Structure!\n", __func__);
} else {
inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL);
if (!inter) {
ret = -ENOMEM;
goto err;
}
temp_int = append_internal(inter);
inter->cis_used = 1;
inter->dev = config->dev;
inter->fpga_rw = config->fpga_rw;
mutex_init(&inter->fpga_mutex);
inter->strt_wrk = 1;
ci_dbg_print("%s: Create New Internal Structure!\n", __func__);
}
ci_dbg_print("%s: setting state = %p for ci = %d\n", __func__,
state, ci_nr - 1);
state->internal = inter;
state->nr = ci_nr - 1;
state->ca.owner = THIS_MODULE;
state->ca.read_attribute_mem = altera_ci_read_attribute_mem;
state->ca.write_attribute_mem = altera_ci_write_attribute_mem;
state->ca.read_cam_control = altera_ci_read_cam_ctl;
state->ca.write_cam_control = altera_ci_write_cam_ctl;
state->ca.slot_reset = altera_ci_slot_reset;
state->ca.slot_shutdown = altera_ci_slot_shutdown;
state->ca.slot_ts_enable = altera_ci_slot_ts_ctl;
state->ca.poll_slot_status = altera_poll_ci_slot_status;
state->ca.data = state;
ret = dvb_ca_en50221_init(config->adapter,
&state->ca,
/* flags */ 0,
/* n_slots */ 1);
if (0 != ret)
goto err;
inter->state[ci_nr - 1] = state;
altera_hw_filt_init(config, ci_nr);
if (inter->strt_wrk) {
INIT_WORK(&inter->work, netup_read_ci_status);
inter->strt_wrk = 0;
}
ci_dbg_print("%s: CI initialized!\n", __func__);
mutex_lock(&inter->fpga_mutex);
/* Enable div */
netup_fpga_op_rw(inter, NETUP_CI_TSA_DIV, 0x0, 0);
netup_fpga_op_rw(inter, NETUP_CI_TSB_DIV, 0x0, 0);
/* enable TS out */
store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, 0, NETUP_CI_FLG_RD);
store |= (3 << 4);
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0);
ret = netup_fpga_op_rw(inter, NETUP_CI_REVISION, 0, NETUP_CI_FLG_RD);
/* enable irq */
netup_fpga_op_rw(inter, NETUP_CI_INT_CTRL, 0x44, 0);
mutex_unlock(&inter->fpga_mutex);
ci_dbg_print("%s: NetUP CI Revision = 0x%x\n", __func__, ret);
schedule_work(&inter->work);
return 0;
err:
ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
kfree(state);
return ret;
}
EXPORT_SYMBOL(altera_ci_init);
int altera_ci_tuner_reset(void *dev, int ci_nr)
{
struct fpga_inode *temp_int = find_inode(dev);
struct fpga_internal *inter = NULL;
u8 store;
ci_dbg_print("%s\n", __func__);
if (temp_int == NULL)
return -1;
if (temp_int->internal == NULL)
return -1;
inter = temp_int->internal;
mutex_lock(&inter->fpga_mutex);
store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, 0, NETUP_CI_FLG_RD);
store &= ~(4 << (2 - ci_nr));
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0);
msleep(100);
store |= (4 << (2 - ci_nr));
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0);
mutex_unlock(&inter->fpga_mutex);
return 0;
}
EXPORT_SYMBOL(altera_ci_tuner_reset);
| gpl-2.0 |
lizhm82/p4a-kernel | net/x25/x25_dev.c | 2945 | 4517 | /*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine, randomly fail to work with new
* releases, misbehave and/or generally screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* This module:
* This module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* History
* X.25 001 Jonathan Naylor Started coding.
* 2000-09-04 Henner Eisen Prevent freeing a dangling skb.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/if_arp.h>
#include <net/x25.h>
#include <net/x25device.h>
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
struct sock *sk;
unsigned short frametype;
unsigned int lci;
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
/*
* LCI of zero is always for us, and its always a link control
* frame.
*/
if (lci == 0) {
x25_link_control(skb, nb, frametype);
return 0;
}
/*
* Find an existing socket.
*/
if ((sk = x25_find_socket(lci, nb)) != NULL) {
int queued = 1;
skb_reset_transport_header(skb);
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
queued = !sk_add_backlog(sk, skb);
}
bh_unlock_sock(sk);
sock_put(sk);
return queued;
}
/*
* Is is a Call Request ? if so process it.
*/
if (frametype == X25_CALL_REQUEST)
return x25_rx_call_request(skb, nb, lci);
/*
* Its not a Call Request, nor is it a control frame.
* Can we forward it?
*/
if (x25_forward_data(lci, nb, skb)) {
if (frametype == X25_CLEAR_CONFIRMATION) {
x25_clear_forward_by_lci(lci);
}
kfree_skb(skb);
return 1;
}
/*
x25_transmit_clear_request(nb, lci, 0x0D);
*/
if (frametype != X25_CLEAR_CONFIRMATION)
printk(KERN_DEBUG "x25_receive_data(): unknown frame type %2x\n",frametype);
return 0;
}
int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
struct sk_buff *nskb;
struct x25_neigh *nb;
if (!net_eq(dev_net(dev), &init_net))
goto drop;
nskb = skb_copy(skb, GFP_ATOMIC);
if (!nskb)
goto drop;
kfree_skb(skb);
skb = nskb;
/*
* Packet received from unrecognised device, throw it away.
*/
nb = x25_get_neigh(dev);
if (!nb) {
printk(KERN_DEBUG "X.25: unknown neighbour - %s\n", dev->name);
goto drop;
}
switch (skb->data[0]) {
case X25_IFACE_DATA:
skb_pull(skb, 1);
if (x25_receive_data(skb, nb)) {
x25_neigh_put(nb);
goto out;
}
break;
case X25_IFACE_CONNECT:
x25_link_established(nb);
break;
case X25_IFACE_DISCONNECT:
x25_link_terminated(nb);
break;
}
x25_neigh_put(nb);
drop:
kfree_skb(skb);
out:
return 0;
}
void x25_establish_link(struct x25_neigh *nb)
{
struct sk_buff *skb;
unsigned char *ptr;
switch (nb->dev->type) {
case ARPHRD_X25:
if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
printk(KERN_ERR "x25_dev: out of memory\n");
return;
}
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_CONNECT;
break;
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
case ARPHRD_ETHER:
return;
#endif
default:
return;
}
skb->protocol = htons(ETH_P_X25);
skb->dev = nb->dev;
dev_queue_xmit(skb);
}
void x25_terminate_link(struct x25_neigh *nb)
{
struct sk_buff *skb;
unsigned char *ptr;
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
if (nb->dev->type == ARPHRD_ETHER)
return;
#endif
if (nb->dev->type != ARPHRD_X25)
return;
skb = alloc_skb(1, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "x25_dev: out of memory\n");
return;
}
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_DISCONNECT;
skb->protocol = htons(ETH_P_X25);
skb->dev = nb->dev;
dev_queue_xmit(skb);
}
void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
{
unsigned char *dptr;
skb_reset_network_header(skb);
switch (nb->dev->type) {
case ARPHRD_X25:
dptr = skb_push(skb, 1);
*dptr = X25_IFACE_DATA;
break;
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
case ARPHRD_ETHER:
kfree_skb(skb);
return;
#endif
default:
kfree_skb(skb);
return;
}
skb->protocol = htons(ETH_P_X25);
skb->dev = nb->dev;
dev_queue_xmit(skb);
}
| gpl-2.0 |
insofter/linux | sound/soc/s6000/s6105-ipcam.c | 3201 | 6683 | /*
* ASoC driver for Stretch s6105 IP camera platform
*
* Author: Daniel Gloeckner, <dg@emlix.com>
* Copyright: (C) 2009 emlix GmbH <info@emlix.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <variant/dmac.h>
#include "s6000-pcm.h"
#include "s6000-i2s.h"
#define S6105_CAM_CODEC_CLOCK 12288000
static int s6105_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBM_CFM |
SND_SOC_DAIFMT_NB_NF);
if (ret < 0)
return ret;
/* set the codec system clock */
ret = snd_soc_dai_set_sysclk(codec_dai, 0, S6105_CAM_CODEC_CLOCK,
SND_SOC_CLOCK_OUT);
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops s6105_ops = {
.hw_params = s6105_hw_params,
};
/* s6105 machine dapm widgets */
static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
SND_SOC_DAPM_LINE("Audio Out Differential", NULL),
SND_SOC_DAPM_LINE("Audio Out Stereo", NULL),
SND_SOC_DAPM_LINE("Audio In", NULL),
};
/* s6105 machine audio_mapnections to the codec pins */
static const struct snd_soc_dapm_route audio_map[] = {
/* Audio Out connected to HPLOUT, HPLCOM, HPROUT */
{"Audio Out Differential", NULL, "HPLOUT"},
{"Audio Out Differential", NULL, "HPLCOM"},
{"Audio Out Stereo", NULL, "HPLOUT"},
{"Audio Out Stereo", NULL, "HPROUT"},
/* Audio In connected to LINE1L, LINE1R */
{"LINE1L", NULL, "Audio In"},
{"LINE1R", NULL, "Audio In"},
};
static int output_type_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 2;
if (uinfo->value.enumerated.item) {
uinfo->value.enumerated.item = 1;
strcpy(uinfo->value.enumerated.name, "HPLOUT/HPROUT");
} else {
strcpy(uinfo->value.enumerated.name, "HPLOUT/HPLCOM");
}
return 0;
}
static int output_type_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = kcontrol->private_value;
return 0;
}
static int output_type_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = kcontrol->private_data;
struct snd_soc_dapm_context *dapm = &codec->dapm;
unsigned int val = (ucontrol->value.enumerated.item[0] != 0);
char *differential = "Audio Out Differential";
char *stereo = "Audio Out Stereo";
if (kcontrol->private_value == val)
return 0;
kcontrol->private_value = val;
snd_soc_dapm_disable_pin(dapm, val ? differential : stereo);
snd_soc_dapm_sync(dapm);
snd_soc_dapm_enable_pin(dapm, val ? stereo : differential);
snd_soc_dapm_sync(dapm);
return 1;
}
static const struct snd_kcontrol_new audio_out_mux = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Output Mux",
.index = 0,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = output_type_info,
.get = output_type_get,
.put = output_type_put,
.private_value = 1 /* default to stereo */
};
/* Logic for a aic3x as connected on the s6105 ip camera ref design */
static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
/* Add s6105 specific widgets */
snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
ARRAY_SIZE(aic3x_dapm_widgets));
/* Set up s6105 specific audio path audio_map */
snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
/* not present */
snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
snd_soc_dapm_nc_pin(dapm, "LINE2L");
snd_soc_dapm_nc_pin(dapm, "LINE2R");
/* not connected */
snd_soc_dapm_nc_pin(dapm, "MIC3L"); /* LINE2L on this chip */
snd_soc_dapm_nc_pin(dapm, "MIC3R"); /* LINE2R on this chip */
snd_soc_dapm_nc_pin(dapm, "LLOUT");
snd_soc_dapm_nc_pin(dapm, "RLOUT");
snd_soc_dapm_nc_pin(dapm, "HPRCOM");
/* always connected */
snd_soc_dapm_enable_pin(dapm, "Audio In");
/* must correspond to audio_out_mux.private_value initializer */
snd_soc_dapm_disable_pin(dapm, "Audio Out Differential");
snd_soc_dapm_sync(dapm);
snd_soc_dapm_enable_pin(dapm, "Audio Out Stereo");
snd_soc_dapm_sync(dapm);
snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&audio_out_mux, codec));
return 0;
}
/* s6105 digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link s6105_dai = {
.name = "TLV320AIC31",
.stream_name = "AIC31",
.cpu_dai_name = "s6000-i2s",
.codec_dai_name = "tlv320aic3x-hifi",
.platform_name = "s6000-pcm-audio",
.codec_name = "tlv320aic3x-codec.0-001a",
.init = s6105_aic3x_init,
.ops = &s6105_ops,
};
/* s6105 audio machine driver */
static struct snd_soc_card snd_soc_card_s6105 = {
.name = "Stretch IP Camera",
.dai_link = &s6105_dai,
.num_links = 1,
};
static struct s6000_snd_platform_data __initdata s6105_snd_data = {
.wide = 0,
.channel_in = 0,
.channel_out = 1,
.lines_in = 1,
.lines_out = 1,
.same_rate = 1,
};
static struct platform_device *s6105_snd_device;
/* temporary i2c device creation until this can be moved into the machine
* support file.
*/
static struct i2c_board_info i2c_device[] = {
{ I2C_BOARD_INFO("tlv320aic33", 0x18), }
};
static int __init s6105_init(void)
{
int ret;
i2c_register_board_info(0, i2c_device, ARRAY_SIZE(i2c_device));
s6105_snd_device = platform_device_alloc("soc-audio", -1);
if (!s6105_snd_device)
return -ENOMEM;
platform_set_drvdata(s6105_snd_device, &snd_soc_card_s6105);
platform_device_add_data(s6105_snd_device, &s6105_snd_data,
sizeof(s6105_snd_data));
ret = platform_device_add(s6105_snd_device);
if (ret)
platform_device_put(s6105_snd_device);
return ret;
}
static void __exit s6105_exit(void)
{
platform_device_unregister(s6105_snd_device);
}
module_init(s6105_init);
module_exit(s6105_exit);
MODULE_AUTHOR("Daniel Gloeckner");
MODULE_DESCRIPTION("Stretch s6105 IP camera ASoC driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
TeamGlide/android_kernel_htc_msm7x30 | drivers/staging/bcm/hostmibs.c | 3201 | 8463 |
/*
* File Name: hostmibs.c
*
* Author: Beceem Communications Pvt. Ltd
*
* Abstract: This file contains the routines to copy the statistics used by
* the driver to the Host MIBS structure and giving the same to Application.
*
*/
#include "headers.h"
INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
{
S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
S_PHS_RULE *pstPhsRule = NULL;
S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION)&Adapter->stBCMPhsContext;
UINT nClassifierIndex = 0, nPhsTableIndex = 0,nSfIndex = 0, uiIndex = 0;
if(pDeviceExtension == NULL)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n");
return STATUS_FAILURE;
}
//Copy the classifier Table
for(nClassifierIndex=0; nClassifierIndex < MAX_CLASSIFIERS;
nClassifierIndex++)
{
if(Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
memcpy((PVOID)&pstHostMibs->astClassifierTable[nClassifierIndex],
(PVOID)&Adapter->astClassifierTable[nClassifierIndex],
sizeof(S_MIBS_CLASSIFIER_RULE));
}
//Copy the SF Table
for(nSfIndex=0; nSfIndex < NO_OF_QUEUES ; nSfIndex++)
{
if(Adapter->PackInfo[nSfIndex].bValid)
{
memcpy((PVOID)&pstHostMibs->astSFtable[nSfIndex],(PVOID)&Adapter->PackInfo[nSfIndex],sizeof(S_MIBS_SERVICEFLOW_TABLE));
}
else
{
//if index in not valid, don't process this for the PHS table. Go For the next entry.
continue ;
}
//Retrieve the SFID Entry Index for requested Service Flow
if(PHS_INVALID_TABLE_INDEX == GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
Adapter->PackInfo[nSfIndex].usVCID_Value ,&pstServiceFlowEntry))
{
continue;
}
pstClassifierTable = pstServiceFlowEntry->pstClassifierTable;
for(uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++)
{
pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex];
if(pstClassifierRule->bUsed)
{
pstPhsRule = pstClassifierRule->pstPhsRule;
pstHostMibs->astPhsRulesTable[nPhsTableIndex].ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
&pstPhsRule->u8PHSI,
sizeof(S_PHS_RULE));
nPhsTableIndex++;
}
}
}
//copy other Host Statistics parameters
pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets;
pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets;
pstHostMibs->stHostInfo.CurrNumFreeDesc =
atomic_read(&Adapter->CurrNumFreeTxDesc);
pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize;
pstHostMibs->stHostInfo.rtPSBucketSize = Adapter->rtPSBucketSize;
pstHostMibs->stHostInfo.TimerActive = Adapter->TimerActive;
pstHostMibs->stHostInfo.u32TotalDSD = Adapter->u32TotalDSD;
memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist,Adapter->aTxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist,Adapter->aRxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
return STATUS_SUCCESS;
}
VOID GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *pstHostMibs, const PPER_TARANG_DATA pTarang)
{
memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
&(pTarang->stDroppedAppCntrlMsgs),sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter,
CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex)
{
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfSfid = psfLocalSet->u32SFID;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid);
}
| gpl-2.0 |
lilferraro/android_kernel_samsung_lt02ltespr | fs/fuse/control.c | 4225 | 8111 | /*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
*/
#include "fuse_i.h"
#include <linux/init.h>
#include <linux/module.h>
#define FUSE_CTL_SUPER_MAGIC 0x65735543
/*
* This is non-NULL when the single instance of the control filesystem
* exists. Protected by fuse_mutex
*/
static struct super_block *fuse_control_sb;
static struct fuse_conn *fuse_ctl_file_conn_get(struct file *file)
{
struct fuse_conn *fc;
mutex_lock(&fuse_mutex);
fc = file->f_path.dentry->d_inode->i_private;
if (fc)
fc = fuse_conn_get(fc);
mutex_unlock(&fuse_mutex);
return fc;
}
static ssize_t fuse_conn_abort_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (fc) {
fuse_abort_conn(fc);
fuse_conn_put(fc);
}
return count;
}
static ssize_t fuse_conn_waiting_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
char tmp[32];
size_t size;
if (!*ppos) {
long value;
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (!fc)
return 0;
value = atomic_read(&fc->num_waiting);
file->private_data = (void *)value;
fuse_conn_put(fc);
}
size = sprintf(tmp, "%ld\n", (long)file->private_data);
return simple_read_from_buffer(buf, len, ppos, tmp, size);
}
static ssize_t fuse_conn_limit_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos, unsigned val)
{
char tmp[32];
size_t size = sprintf(tmp, "%u\n", val);
return simple_read_from_buffer(buf, len, ppos, tmp, size);
}
static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos, unsigned *val,
unsigned global_limit)
{
unsigned long t;
char tmp[32];
unsigned limit = (1 << 16) - 1;
int err;
if (*ppos || count >= sizeof(tmp) - 1)
return -EINVAL;
if (copy_from_user(tmp, buf, count))
return -EINVAL;
tmp[count] = '\0';
err = strict_strtoul(tmp, 0, &t);
if (err)
return err;
if (!capable(CAP_SYS_ADMIN))
limit = min(limit, global_limit);
if (t > limit)
return -EINVAL;
*val = t;
return count;
}
static ssize_t fuse_conn_max_background_read(struct file *file,
char __user *buf, size_t len,
loff_t *ppos)
{
struct fuse_conn *fc;
unsigned val;
fc = fuse_ctl_file_conn_get(file);
if (!fc)
return 0;
val = fc->max_background;
fuse_conn_put(fc);
return fuse_conn_limit_read(file, buf, len, ppos, val);
}
static ssize_t fuse_conn_max_background_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned val;
ssize_t ret;
ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
max_user_bgreq);
if (ret > 0) {
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (fc) {
fc->max_background = val;
fuse_conn_put(fc);
}
}
return ret;
}
static ssize_t fuse_conn_congestion_threshold_read(struct file *file,
char __user *buf, size_t len,
loff_t *ppos)
{
struct fuse_conn *fc;
unsigned val;
fc = fuse_ctl_file_conn_get(file);
if (!fc)
return 0;
val = fc->congestion_threshold;
fuse_conn_put(fc);
return fuse_conn_limit_read(file, buf, len, ppos, val);
}
static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned val;
ssize_t ret;
ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
max_user_congthresh);
if (ret > 0) {
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (fc) {
fc->congestion_threshold = val;
fuse_conn_put(fc);
}
}
return ret;
}
static const struct file_operations fuse_ctl_abort_ops = {
.open = nonseekable_open,
.write = fuse_conn_abort_write,
.llseek = no_llseek,
};
static const struct file_operations fuse_ctl_waiting_ops = {
.open = nonseekable_open,
.read = fuse_conn_waiting_read,
.llseek = no_llseek,
};
static const struct file_operations fuse_conn_max_background_ops = {
.open = nonseekable_open,
.read = fuse_conn_max_background_read,
.write = fuse_conn_max_background_write,
.llseek = no_llseek,
};
static const struct file_operations fuse_conn_congestion_threshold_ops = {
.open = nonseekable_open,
.read = fuse_conn_congestion_threshold_read,
.write = fuse_conn_congestion_threshold_write,
.llseek = no_llseek,
};
static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
struct fuse_conn *fc,
const char *name,
int mode, int nlink,
const struct inode_operations *iop,
const struct file_operations *fop)
{
struct dentry *dentry;
struct inode *inode;
BUG_ON(fc->ctl_ndents >= FUSE_CTL_NUM_DENTRIES);
dentry = d_alloc_name(parent, name);
if (!dentry)
return NULL;
fc->ctl_dentry[fc->ctl_ndents++] = dentry;
inode = new_inode(fuse_control_sb);
if (!inode)
return NULL;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = fc->user_id;
inode->i_gid = fc->group_id;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
/* setting ->i_op to NULL is not allowed */
if (iop)
inode->i_op = iop;
inode->i_fop = fop;
set_nlink(inode, nlink);
inode->i_private = fc;
d_add(dentry, inode);
return dentry;
}
/*
* Add a connection to the control filesystem (if it exists). Caller
* must hold fuse_mutex
*/
int fuse_ctl_add_conn(struct fuse_conn *fc)
{
struct dentry *parent;
char name[32];
if (!fuse_control_sb)
return 0;
parent = fuse_control_sb->s_root;
inc_nlink(parent->d_inode);
sprintf(name, "%u", fc->dev);
parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500, 2,
&simple_dir_inode_operations,
&simple_dir_operations);
if (!parent)
goto err;
if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400, 1,
NULL, &fuse_ctl_waiting_ops) ||
!fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200, 1,
NULL, &fuse_ctl_abort_ops) ||
!fuse_ctl_add_dentry(parent, fc, "max_background", S_IFREG | 0600,
1, NULL, &fuse_conn_max_background_ops) ||
!fuse_ctl_add_dentry(parent, fc, "congestion_threshold",
S_IFREG | 0600, 1, NULL,
&fuse_conn_congestion_threshold_ops))
goto err;
return 0;
err:
fuse_ctl_remove_conn(fc);
return -ENOMEM;
}
/*
* Remove a connection from the control filesystem (if it exists).
* Caller must hold fuse_mutex
*/
void fuse_ctl_remove_conn(struct fuse_conn *fc)
{
int i;
if (!fuse_control_sb)
return;
for (i = fc->ctl_ndents - 1; i >= 0; i--) {
struct dentry *dentry = fc->ctl_dentry[i];
dentry->d_inode->i_private = NULL;
d_drop(dentry);
dput(dentry);
}
drop_nlink(fuse_control_sb->s_root->d_inode);
}
static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
{
struct tree_descr empty_descr = {""};
struct fuse_conn *fc;
int err;
err = simple_fill_super(sb, FUSE_CTL_SUPER_MAGIC, &empty_descr);
if (err)
return err;
mutex_lock(&fuse_mutex);
BUG_ON(fuse_control_sb);
fuse_control_sb = sb;
list_for_each_entry(fc, &fuse_conn_list, entry) {
err = fuse_ctl_add_conn(fc);
if (err) {
fuse_control_sb = NULL;
mutex_unlock(&fuse_mutex);
return err;
}
}
mutex_unlock(&fuse_mutex);
return 0;
}
static struct dentry *fuse_ctl_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data)
{
return mount_single(fs_type, flags, raw_data, fuse_ctl_fill_super);
}
static void fuse_ctl_kill_sb(struct super_block *sb)
{
struct fuse_conn *fc;
mutex_lock(&fuse_mutex);
fuse_control_sb = NULL;
list_for_each_entry(fc, &fuse_conn_list, entry)
fc->ctl_ndents = 0;
mutex_unlock(&fuse_mutex);
kill_litter_super(sb);
}
static struct file_system_type fuse_ctl_fs_type = {
.owner = THIS_MODULE,
.name = "fusectl",
.mount = fuse_ctl_mount,
.kill_sb = fuse_ctl_kill_sb,
};
int __init fuse_ctl_init(void)
{
return register_filesystem(&fuse_ctl_fs_type);
}
void fuse_ctl_cleanup(void)
{
unregister_filesystem(&fuse_ctl_fs_type);
}
| gpl-2.0 |
mthous72/a13_kernel_3.0.8 | arch/mips/mm/uasm.c | 7553 | 18097 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* A small micro-assembler. It is intentionally kept simple, does only
* support a subset of instructions, and does not try to hide pipeline
* effects like branch delay slots.
*
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
#include <asm/uasm.h>
enum fields {
RS = 0x001,
RT = 0x002,
RD = 0x004,
RE = 0x008,
SIMM = 0x010,
UIMM = 0x020,
BIMM = 0x040,
JIMM = 0x080,
FUNC = 0x100,
SET = 0x200,
SCIMM = 0x400
};
#define OP_MASK 0x3f
#define OP_SH 26
#define RS_MASK 0x1f
#define RS_SH 21
#define RT_MASK 0x1f
#define RT_SH 16
#define RD_MASK 0x1f
#define RD_SH 11
#define RE_MASK 0x1f
#define RE_SH 6
#define IMM_MASK 0xffff
#define IMM_SH 0
#define JIMM_MASK 0x3ffffff
#define JIMM_SH 0
#define FUNC_MASK 0x3f
#define FUNC_SH 0
#define SET_MASK 0x7
#define SET_SH 0
#define SCIMM_MASK 0xfffff
#define SCIMM_SH 6
enum opcode {
insn_invalid,
insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
insn_dsrl32, insn_drotr, insn_drotr32, insn_dsubu, insn_eret,
insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld,
insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_or, insn_ori,
insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp,
insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
insn_dins, insn_dinsm, insn_syscall, insn_bbit0, insn_bbit1,
insn_lwx, insn_ldx
};
struct insn {
enum opcode opcode;
u32 match;
enum fields fields;
};
/* This macro sets the non-variable bits of an instruction. */
#define M(a, b, c, d, e, f) \
((a) << OP_SH \
| (b) << RS_SH \
| (c) << RT_SH \
| (d) << RD_SH \
| (e) << RE_SH \
| (f) << FUNC_SH)
static struct insn insn_table[] __uasminitdata = {
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
{ insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
{ insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
{ insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
{ insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
{ insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
{ insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
{ insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
{ insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
{ insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
{ insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
{ insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
{ insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
{ insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
{ insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
{ insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
{ insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
{ insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
{ insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
{ insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
{ insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
{ insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
{ insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
{ insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
{ insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
{ insn_invalid, 0, 0 }
};
#undef M
static inline __uasminit u32 build_rs(u32 arg)
{
WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RS_MASK) << RS_SH;
}
static inline __uasminit u32 build_rt(u32 arg)
{
WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RT_MASK) << RT_SH;
}
static inline __uasminit u32 build_rd(u32 arg)
{
WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RD_MASK) << RD_SH;
}
static inline __uasminit u32 build_re(u32 arg)
{
WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RE_MASK) << RE_SH;
}
static inline __uasminit u32 build_simm(s32 arg)
{
WARN(arg > 0x7fff || arg < -0x8000,
KERN_WARNING "Micro-assembler field overflow\n");
return arg & 0xffff;
}
static inline __uasminit u32 build_uimm(u32 arg)
{
WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & IMM_MASK;
}
static inline __uasminit u32 build_bimm(s32 arg)
{
WARN(arg > 0x1ffff || arg < -0x20000,
KERN_WARNING "Micro-assembler field overflow\n");
WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
}
static inline __uasminit u32 build_jimm(u32 arg)
{
WARN(arg & ~(JIMM_MASK << 2),
KERN_WARNING "Micro-assembler field overflow\n");
return (arg >> 2) & JIMM_MASK;
}
static inline __uasminit u32 build_scimm(u32 arg)
{
WARN(arg & ~SCIMM_MASK,
KERN_WARNING "Micro-assembler field overflow\n");
return (arg & SCIMM_MASK) << SCIMM_SH;
}
static inline __uasminit u32 build_func(u32 arg)
{
WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & FUNC_MASK;
}
static inline __uasminit u32 build_set(u32 arg)
{
WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & SET_MASK;
}
/*
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
{
struct insn *ip = NULL;
unsigned int i;
va_list ap;
u32 op;
for (i = 0; insn_table[i].opcode != insn_invalid; i++)
if (insn_table[i].opcode == opc) {
ip = &insn_table[i];
break;
}
if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
panic("Unsupported Micro-assembler instruction %d", opc);
op = ip->match;
va_start(ap, opc);
if (ip->fields & RS)
op |= build_rs(va_arg(ap, u32));
if (ip->fields & RT)
op |= build_rt(va_arg(ap, u32));
if (ip->fields & RD)
op |= build_rd(va_arg(ap, u32));
if (ip->fields & RE)
op |= build_re(va_arg(ap, u32));
if (ip->fields & SIMM)
op |= build_simm(va_arg(ap, s32));
if (ip->fields & UIMM)
op |= build_uimm(va_arg(ap, u32));
if (ip->fields & BIMM)
op |= build_bimm(va_arg(ap, s32));
if (ip->fields & JIMM)
op |= build_jimm(va_arg(ap, u32));
if (ip->fields & FUNC)
op |= build_func(va_arg(ap, u32));
if (ip->fields & SET)
op |= build_set(va_arg(ap, u32));
if (ip->fields & SCIMM)
op |= build_scimm(va_arg(ap, u32));
va_end(ap);
**buf = op;
(*buf)++;
}
#define I_u1u2u3(op) \
Ip_u1u2u3(op) \
{ \
build_insn(buf, insn##op, a, b, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1u3(op) \
Ip_u2u1u3(op) \
{ \
build_insn(buf, insn##op, b, a, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u3u1u2(op) \
Ip_u3u1u2(op) \
{ \
build_insn(buf, insn##op, b, c, a); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1u2s3(op) \
Ip_u1u2s3(op) \
{ \
build_insn(buf, insn##op, a, b, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2s3u1(op) \
Ip_u2s3u1(op) \
{ \
build_insn(buf, insn##op, c, a, b); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1s3(op) \
Ip_u2u1s3(op) \
{ \
build_insn(buf, insn##op, b, a, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1msbu3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, c+d-1, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1msb32u3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, c+d-33, c); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1u2(op) \
Ip_u1u2(op) \
{ \
build_insn(buf, insn##op, a, b); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1s2(op) \
Ip_u1s2(op) \
{ \
build_insn(buf, insn##op, a, b); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1(op) \
Ip_u1(op) \
{ \
build_insn(buf, insn##op, a); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_0(op) \
Ip_0(op) \
{ \
build_insn(buf, insn##op); \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
I_u2u1s3(_addiu)
I_u3u1u2(_addu)
I_u2u1u3(_andi)
I_u3u1u2(_and)
I_u1u2s3(_beq)
I_u1u2s3(_beql)
I_u1s2(_bgez)
I_u1s2(_bgezl)
I_u1s2(_bltz)
I_u1s2(_bltzl)
I_u1u2s3(_bne)
I_u2s3u1(_cache)
I_u1u2u3(_dmfc0)
I_u1u2u3(_dmtc0)
I_u2u1s3(_daddiu)
I_u3u1u2(_daddu)
I_u2u1u3(_dsll)
I_u2u1u3(_dsll32)
I_u2u1u3(_dsra)
I_u2u1u3(_dsrl)
I_u2u1u3(_dsrl32)
I_u2u1u3(_drotr)
I_u2u1u3(_drotr32)
I_u3u1u2(_dsubu)
I_0(_eret)
I_u1(_j)
I_u1(_jal)
I_u1(_jr)
I_u2s3u1(_ld)
I_u2s3u1(_ll)
I_u2s3u1(_lld)
I_u1s2(_lui)
I_u2s3u1(_lw)
I_u1u2u3(_mfc0)
I_u1u2u3(_mtc0)
I_u2u1u3(_ori)
I_u3u1u2(_or)
I_0(_rfe)
I_u2s3u1(_sc)
I_u2s3u1(_scd)
I_u2s3u1(_sd)
I_u2u1u3(_sll)
I_u2u1u3(_sra)
I_u2u1u3(_srl)
I_u2u1u3(_rotr)
I_u3u1u2(_subu)
I_u2s3u1(_sw)
I_0(_tlbp)
I_0(_tlbr)
I_0(_tlbwi)
I_0(_tlbwr)
I_u3u1u2(_xor)
I_u2u1u3(_xori)
I_u2u1msbu3(_dins);
I_u2u1msb32u3(_dinsm);
I_u1(_syscall);
I_u1u2s3(_bbit0);
I_u1u2s3(_bbit1);
I_u3u1u2(_lwx)
I_u3u1u2(_ldx)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
/*
* As per erratum Core-14449, replace prefetches 0-4,
* 6-24 with 'pref 28'.
*/
build_insn(buf, insn_pref, c, 28, b);
else
build_insn(buf, insn_pref, c, a, b);
}
UASM_EXPORT_SYMBOL(uasm_i_pref);
#else
I_u2s3u1(_pref)
#endif
/* Handle labels. */
void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
(*lab)++;
}
UASM_EXPORT_SYMBOL(uasm_build_label);
int __uasminit uasm_in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT
return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
#else
return 1;
#endif
}
UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
static int __uasminit uasm_rel_highest(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
#else
return 0;
#endif
}
static int __uasminit uasm_rel_higher(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
#else
return 0;
#endif
}
int __uasminit uasm_rel_hi(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(uasm_rel_hi);
int __uasminit uasm_rel_lo(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(uasm_rel_lo);
void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
if (!uasm_in_compat_space_p(addr)) {
uasm_i_lui(buf, rs, uasm_rel_highest(addr));
if (uasm_rel_higher(addr))
uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
if (uasm_rel_hi(addr)) {
uasm_i_dsll(buf, rs, rs, 16);
uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr));
uasm_i_dsll(buf, rs, rs, 16);
} else
uasm_i_dsll32(buf, rs, rs, 0);
} else
uasm_i_lui(buf, rs, uasm_rel_hi(addr));
}
UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
{
UASM_i_LA_mostly(buf, rs, addr);
if (uasm_rel_lo(addr)) {
if (!uasm_in_compat_space_p(addr))
uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr));
else
uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
}
}
UASM_EXPORT_SYMBOL(UASM_i_LA);
/* Handle relocations. */
void __uasminit
uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16;
(*rel)->lab = lid;
(*rel)++;
}
UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
static inline void __uasminit
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
long laddr = (long)lab->addr;
long raddr = (long)rel->addr;
switch (rel->type) {
case R_MIPS_PC16:
*rel->addr |= build_bimm(laddr - (raddr + 4));
break;
default:
panic("Unsupported Micro-assembler relocation %d",
rel->type);
}
}
void __uasminit
uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
struct uasm_label *l;
for (; rel->lab != UASM_LABEL_INVALID; rel++)
for (l = lab; l->lab != UASM_LABEL_INVALID; l++)
if (rel->lab == l->lab)
__resolve_relocs(rel, l);
}
UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
void __uasminit
uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
rel->addr += off;
}
UASM_EXPORT_SYMBOL(uasm_move_relocs);
void __uasminit
uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
lab->addr += off;
}
UASM_EXPORT_SYMBOL(uasm_move_labels);
void __uasminit
uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
u32 *end, u32 *target)
{
long off = (long)(target - first);
memcpy(target, first, (end - first) * sizeof(u32));
uasm_move_relocs(rel, first, end, off);
uasm_move_labels(lab, first, end, off);
}
UASM_EXPORT_SYMBOL(uasm_copy_handler);
int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
&& (rel->type == R_MIPS_PC16
|| rel->type == R_MIPS_26))
return 1;
}
return 0;
}
UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
/* Convenience functions for labeled branches. */
void __uasminit
uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bltz(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bltz);
void __uasminit
uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_b(p, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_b);
void __uasminit
uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqz(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_beqz);
void __uasminit
uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqzl(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_beqzl);
void __uasminit
uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bne(p, reg1, reg2, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bne);
void __uasminit
uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bnez(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bnez);
void __uasminit
uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bgezl(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bgezl);
void __uasminit
uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bgez(p, reg, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bgez);
void __uasminit
uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bbit0(p, reg, bit, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bbit0);
void __uasminit
uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bbit1(p, reg, bit, 0);
}
UASM_EXPORT_SYMBOL(uasm_il_bbit1);
| gpl-2.0 |
hellsgod/hells-Core-N4 | drivers/firmware/memmap.c | 9089 | 7301 | /*
* linux/drivers/firmware/memmap.c
* Copyright (C) 2008 SUSE LINUX Products GmbH
* by Bernhard Walle <bernhard.walle@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License v2.0 as published by
* the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/string.h>
#include <linux/firmware-map.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
/*
* Data types ------------------------------------------------------------------
*/
/*
* Firmware map entry. Because firmware memory maps are flat and not
* hierarchical, it's ok to organise them in a linked list. No parent
* information is necessary as for the resource tree.
*/
struct firmware_map_entry {
/*
* start and end must be u64 rather than resource_size_t, because e820
* resources can lie at addresses above 4G.
*/
u64 start; /* start of the memory range */
u64 end; /* end of the memory range (incl.) */
const char *type; /* type of the memory range */
struct list_head list; /* entry for the linked list */
struct kobject kobj; /* kobject for each entry */
};
/*
* Forward declarations --------------------------------------------------------
*/
static ssize_t memmap_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf);
static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
/*
* Static data -----------------------------------------------------------------
*/
struct memmap_attribute {
struct attribute attr;
ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
};
static struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
static struct memmap_attribute memmap_end_attr = __ATTR_RO(end);
static struct memmap_attribute memmap_type_attr = __ATTR_RO(type);
/*
* These are default attributes that are added for every memmap entry.
*/
static struct attribute *def_attrs[] = {
&memmap_start_attr.attr,
&memmap_end_attr.attr,
&memmap_type_attr.attr,
NULL
};
static const struct sysfs_ops memmap_attr_ops = {
.show = memmap_attr_show,
};
static struct kobj_type memmap_ktype = {
.sysfs_ops = &memmap_attr_ops,
.default_attrs = def_attrs,
};
/*
* Registration functions ------------------------------------------------------
*/
/*
* Firmware memory map entries. No locking is needed because the
* firmware_map_add() and firmware_map_add_early() functions are called
* in firmware initialisation code in one single thread of execution.
*/
static LIST_HEAD(map_entries);
/**
* firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
* @start: Start of the memory range.
* @end: End of the memory range (inclusive).
* @type: Type of the memory range.
* @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
* entry.
*
* Common implementation of firmware_map_add() and firmware_map_add_early()
* which expects a pre-allocated struct firmware_map_entry.
**/
static int firmware_map_add_entry(u64 start, u64 end,
const char *type,
struct firmware_map_entry *entry)
{
BUG_ON(start > end);
entry->start = start;
entry->end = end;
entry->type = type;
INIT_LIST_HEAD(&entry->list);
kobject_init(&entry->kobj, &memmap_ktype);
list_add_tail(&entry->list, &map_entries);
return 0;
}
/*
* Add memmap entry on sysfs
*/
static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
{
static int map_entries_nr;
static struct kset *mmap_kset;
if (!mmap_kset) {
mmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
if (!mmap_kset)
return -ENOMEM;
}
entry->kobj.kset = mmap_kset;
if (kobject_add(&entry->kobj, NULL, "%d", map_entries_nr++))
kobject_put(&entry->kobj);
return 0;
}
/**
* firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
* memory hotplug.
* @start: Start of the memory range.
* @end: End of the memory range (inclusive).
* @type: Type of the memory range.
*
* Adds a firmware mapping entry. This function is for memory hotplug, it is
* similar to function firmware_map_add_early(). The only difference is that
* it will create the syfs entry dynamically.
*
* Returns 0 on success, or -ENOMEM if no memory could be allocated.
**/
int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
firmware_map_add_entry(start, end, type, entry);
/* create the memmap entry */
add_sysfs_fw_map_entry(entry);
return 0;
}
/**
* firmware_map_add_early() - Adds a firmware mapping entry.
* @start: Start of the memory range.
* @end: End of the memory range (inclusive).
* @type: Type of the memory range.
*
* Adds a firmware mapping entry. This function uses the bootmem allocator
* for memory allocation.
*
* That function must be called before late_initcall.
*
* Returns 0 on success, or -ENOMEM if no memory could be allocated.
**/
int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
entry = alloc_bootmem(sizeof(struct firmware_map_entry));
if (WARN_ON(!entry))
return -ENOMEM;
return firmware_map_add_entry(start, end, type, entry);
}
/*
* Sysfs functions -------------------------------------------------------------
*/
static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)entry->start);
}
static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)entry->end);
}
static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
}
#define to_memmap_attr(_attr) container_of(_attr, struct memmap_attribute, attr)
#define to_memmap_entry(obj) container_of(obj, struct firmware_map_entry, kobj)
static ssize_t memmap_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct firmware_map_entry *entry = to_memmap_entry(kobj);
struct memmap_attribute *memmap_attr = to_memmap_attr(attr);
return memmap_attr->show(entry, buf);
}
/*
* Initialises stuff and adds the entries in the map_entries list to
* sysfs. Important is that firmware_map_add() and firmware_map_add_early()
* must be called before late_initcall. That's just because that function
* is called as late_initcall() function, which means that if you call
* firmware_map_add() or firmware_map_add_early() afterwards, the entries
* are not added to sysfs.
*/
static int __init memmap_init(void)
{
struct firmware_map_entry *entry;
list_for_each_entry(entry, &map_entries, list)
add_sysfs_fw_map_entry(entry);
return 0;
}
late_initcall(memmap_init);
| gpl-2.0 |
coreentin/android_kernel_nvidia_s8515 | net/netfilter/xt_CHECKSUM.c | 13185 | 1796 | /* iptables module for the packet checksum mangling
*
* (C) 2002 by Harald Welte <laforge@netfilter.org>
* (C) 2010 Red Hat, Inc.
*
* Author: Michael S. Tsirkin <mst@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CHECKSUM.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
MODULE_DESCRIPTION("Xtables: checksum modification");
MODULE_ALIAS("ipt_CHECKSUM");
MODULE_ALIAS("ip6t_CHECKSUM");
static unsigned int
checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb_checksum_help(skb);
return XT_CONTINUE;
}
static int checksum_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_CHECKSUM_info *einfo = par->targinfo;
if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
pr_info("unsupported CHECKSUM operation %x\n", einfo->operation);
return -EINVAL;
}
if (!einfo->operation) {
pr_info("no CHECKSUM operation enabled\n");
return -EINVAL;
}
return 0;
}
static struct xt_target checksum_tg_reg __read_mostly = {
.name = "CHECKSUM",
.family = NFPROTO_UNSPEC,
.target = checksum_tg,
.targetsize = sizeof(struct xt_CHECKSUM_info),
.table = "mangle",
.checkentry = checksum_tg_check,
.me = THIS_MODULE,
};
static int __init checksum_tg_init(void)
{
return xt_register_target(&checksum_tg_reg);
}
static void __exit checksum_tg_exit(void)
{
xt_unregister_target(&checksum_tg_reg);
}
module_init(checksum_tg_init);
module_exit(checksum_tg_exit);
| gpl-2.0 |
ISTweak/android_kernel_sharp_msm8960 | fs/isofs/joliet.c | 14465 | 1357 | /*
* linux/fs/isofs/joliet.c
*
* (C) 1996 Gordon Chaffee
*
* Joliet: Microsoft's Unicode extensions to iso9660
*/
#include <linux/types.h>
#include <linux/nls.h>
#include "isofs.h"
/*
* Convert Unicode 16 to UTF-8 or ASCII.
*/
static int
uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
{
__be16 *ip, ch;
unsigned char *op;
ip = uni;
op = ascii;
while ((ch = get_unaligned(ip)) && len) {
int llen;
llen = nls->uni2char(be16_to_cpu(ch), op, NLS_MAX_CHARSET_SIZE);
if (llen > 0)
op += llen;
else
*op++ = '?';
ip++;
len--;
}
*op = 0;
return (op - ascii);
}
int
get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
{
unsigned char utf8;
struct nls_table *nls;
unsigned char len = 0;
utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
if (utf8) {
len = utf16s_to_utf8s((const wchar_t *) de->name,
de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
outname, PAGE_SIZE);
} else {
len = uni16_to_x8(outname, (__be16 *) de->name,
de->name_len[0] >> 1, nls);
}
if ((len > 2) && (outname[len-2] == ';') && (outname[len-1] == '1'))
len -= 2;
/*
* Windows doesn't like periods at the end of a name,
* so neither do we
*/
while (len >= 2 && (outname[len-1] == '.'))
len--;
return len;
}
| gpl-2.0 |
SeniorLimpio/ldroid_kernel | drivers/scsi/pcmcia/nsp_message.c | 15489 | 2158 | /*==========================================================================
NinjaSCSI-3 message handler
By: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
This software may be used and distributed according to the terms of
the GNU General Public License.
*/
/* $Id: nsp_message.c,v 1.6 2003/07/26 14:21:09 elca Exp $ */
static void nsp_message_in(struct scsi_cmnd *SCpnt)
{
unsigned int base = SCpnt->device->host->io_port;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
unsigned char data_reg, control_reg;
int ret, len;
/*
* XXX: NSP QUIRK
* NSP invoke interrupts only in the case of scsi phase changes,
* therefore we should poll the scsi phase here to catch
* the next "msg in" if exists (no scsi phase changes).
*/
ret = 16;
len = 0;
nsp_dbg(NSP_DEBUG_MSGINOCCUR, "msgin loop");
do {
/* read data */
data_reg = nsp_index_read(base, SCSIDATAIN);
/* assert ACK */
control_reg = nsp_index_read(base, SCSIBUSCTRL);
control_reg |= SCSI_ACK;
nsp_index_write(base, SCSIBUSCTRL, control_reg);
nsp_negate_signal(SCpnt, BUSMON_REQ, "msgin<REQ>");
data->MsgBuffer[len] = data_reg; len++;
/* deassert ACK */
control_reg = nsp_index_read(base, SCSIBUSCTRL);
control_reg &= ~SCSI_ACK;
nsp_index_write(base, SCSIBUSCTRL, control_reg);
/* catch a next signal */
ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_IN, BUSMON_REQ);
} while (ret > 0 && MSGBUF_SIZE > len);
data->MsgLen = len;
}
static void nsp_message_out(struct scsi_cmnd *SCpnt)
{
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
int ret = 1;
int len = data->MsgLen;
/*
* XXX: NSP QUIRK
* NSP invoke interrupts only in the case of scsi phase changes,
* therefore we should poll the scsi phase here to catch
* the next "msg out" if exists (no scsi phase changes).
*/
nsp_dbg(NSP_DEBUG_MSGOUTOCCUR, "msgout loop");
do {
if (nsp_xfer(SCpnt, BUSPHASE_MESSAGE_OUT)) {
nsp_msg(KERN_DEBUG, "msgout: xfer short");
}
/* catch a next signal */
ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_OUT, BUSMON_REQ);
} while (ret > 0 && len-- > 0);
}
/* end */
| gpl-2.0 |
drowningchild/dc-geebus | arch/arm/mach-omap2/omap-smp.c | 130 | 4682 | /*
* OMAP4 SMP source file. It contains platform specific fucntions
* needed for the linux smp kernel.
*
* Copyright (C) 2009 Texas Instruments, Inc.
*
* Author:
* Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* Platform file needed for the OMAP4 SMP. This file is based on arm
* realview smp platform.
* * Copyright (c) 2002 ARM Limited.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/hardware/gic.h>
#include <asm/smp_scu.h>
#include <mach/hardware.h>
#include <mach/omap-secure.h>
#include "iomap.h"
#include "common.h"
#include "clockdomain.h"
/* SCU base address */
static void __iomem *scu_base;
static DEFINE_SPINLOCK(boot_lock);
void __iomem *omap4_get_scu_base(void)
{
return scu_base;
}
void platform_secondary_init(unsigned int cpu)
{
/*
* Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
* OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
* init and for CPU1, a secure PPA API provided. CPU0 must be ON
* while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
* OMAP443X GP devices- SMP bit isn't accessible.
* OMAP446X GP devices - SMP bit access is enabled on both CPUs.
*/
if (cpu_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
4, 0, 0, 0, 0, 0);
/*
* If any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
* for us: do so
*/
gic_secondary_init(0);
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
}
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
static struct clockdomain *cpu1_clkdm;
static bool booted;
/*
* Set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
* omap_secondary_startup() routine will hold the secondary core till
* the AuxCoreBoot1 register is updated with cpu state
* A barrier is added to ensure that write buffer is drained
*/
omap_modify_auxcoreboot0(0x200, 0xfffffdff);
flush_cache_all();
smp_wmb();
if (!cpu1_clkdm)
cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
/*
* The SGI(Software Generated Interrupts) are not wakeup capable
* from low power states. This is known limitation on OMAP4 and
* needs to be worked around by using software forced clockdomain
* wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
* software force wakeup. The clockdomain is then put back to
* hardware supervised mode.
* More details can be found in OMAP4430 TRM - Version J
* Section :
* 4.3.4.2 Power States of CPU0 and CPU1
*/
if (booted) {
clkdm_wakeup(cpu1_clkdm);
clkdm_allow_idle(cpu1_clkdm);
} else {
dsb_sev();
booted = true;
}
gic_raise_softirq(cpumask_of(cpu), 1);
/*
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return 0;
}
static void __init wakeup_secondary(void)
{
/*
* Write the address of secondary startup routine into the
* AuxCoreBoot1 where ROM code will jump and start executing
* on secondary core once out of WFE
* A barrier is added to ensure that write buffer is drained
*/
omap_auxcoreboot_addr(virt_to_phys(omap_secondary_startup));
smp_wmb();
/*
* Send a 'sev' to wake the secondary core from WFE.
* Drain the outstanding writes to memory
*/
dsb_sev();
mb();
}
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
*/
void __init smp_init_cpus(void)
{
unsigned int i, ncores;
/*
* Currently we can't call ioremap here because
* SoC detection won't work until after init_early.
*/
scu_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_SCU_BASE);
BUG_ON(!scu_base);
ncores = scu_get_core_count(scu_base);
/* sanity check */
if (ncores > nr_cpu_ids) {
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
ncores, nr_cpu_ids);
ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
set_smp_cross_call(gic_raise_softirq);
}
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
/*
* Initialise the SCU and wake up the secondary core using
* wakeup_secondary().
*/
scu_enable(scu_base);
wakeup_secondary();
}
| gpl-2.0 |
maximus64/bbb-usbsniffer-kernel | net/ipv4/igmp.c | 130 | 67362 | /*
* Linux NET3: Internet Group Management Protocol [IGMP]
*
* This code implements the IGMP protocol as defined in RFC1112. There has
* been a further revision of this protocol since which is now supported.
*
* If you have trouble with this module be careful what gcc you have used,
* the older version didn't come out right using gcc 2.5.8, the newer one
* seems to fall out with gcc 2.6.2.
*
* Authors:
* Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
*
* Alan Cox : Added lots of __inline__ to optimise
* the memory usage of all the tiny little
* functions.
* Alan Cox : Dumped the header building experiment.
* Alan Cox : Minor tweaks ready for multicast routing
* and extended IGMP protocol.
* Alan Cox : Removed a load of inline directives. Gcc 2.5.8
* writes utterly bogus code otherwise (sigh)
* fixed IGMP loopback to behave in the manner
* desired by mrouted, fixed the fact it has been
* broken since 1.3.6 and cleaned up a few minor
* points.
*
* Chih-Jen Chang : Tried to revise IGMP to Version 2
* Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
* The enhancements are mainly based on Steve Deering's
* ipmulti-3.5 source code.
* Chih-Jen Chang : Added the igmp_get_mrouter_info and
* Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of
* the mrouted version on that device.
* Chih-Jen Chang : Added the max_resp_time parameter to
* Tsu-Sheng Tsao igmp_heard_query(). Using this parameter
* to identify the multicast router version
* and do what the IGMP version 2 specified.
* Chih-Jen Chang : Added a timer to revert to IGMP V2 router
* Tsu-Sheng Tsao if the specified time expired.
* Alan Cox : Stop IGMP from 0.0.0.0 being accepted.
* Alan Cox : Use GFP_ATOMIC in the right places.
* Christian Daudt : igmp timer wasn't set for local group
* memberships but was being deleted,
* which caused a "del_timer() called
* from %p with timer not initialized\n"
* message (960131).
* Christian Daudt : removed del_timer from
* igmp_timer_expire function (960205).
* Christian Daudt : igmp_heard_report now only calls
* igmp_timer_expire if tm->running is
* true (960216).
* Malcolm Beattie : ttl comparison wrong in igmp_rcv made
* igmp_heard_query never trigger. Expiry
* miscalculation fixed in igmp_heard_query
* and random() made to return unsigned to
* prevent negative expiry times.
* Alexey Kuznetsov: Wrong group leaving behaviour, backport
* fix from pending 2.1.x patches.
* Alan Cox: Forget to enable FDDI support earlier.
* Alexey Kuznetsov: Fixed leaving groups on device down.
* Alexey Kuznetsov: Accordance to igmp-v2-06 draft.
* David L Stevens: IGMPv3 support, with help from
* Vinay Kulkarni
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/times.h>
#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/arp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <linux/netfilter_ipv4.h>
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
#endif
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#endif
#define IP_MAX_MEMBERSHIPS 20
#define IP_MAX_MSF 10
#ifdef CONFIG_IP_MULTICAST
/* Parameter names and values are taken from igmp-v2-06 draft */
#define IGMP_V1_Router_Present_Timeout (400*HZ)
#define IGMP_V2_Router_Present_Timeout (400*HZ)
#define IGMP_V2_Unsolicited_Report_Interval (10*HZ)
#define IGMP_V3_Unsolicited_Report_Interval (1*HZ)
#define IGMP_Query_Response_Interval (10*HZ)
#define IGMP_Unsolicited_Report_Count 2
#define IGMP_Initial_Report_Delay (1)
/* IGMP_Initial_Report_Delay is not from IGMP specs!
* IGMP specs require to report membership immediately after
* joining a group, but we delay the first report by a
* small interval. It seems more natural and still does not
* contradict to specs provided this delay is small enough.
*/
#define IGMP_V1_SEEN(in_dev) \
(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
((in_dev)->mr_v1_seen && \
time_before(jiffies, (in_dev)->mr_v1_seen)))
#define IGMP_V2_SEEN(in_dev) \
(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
((in_dev)->mr_v2_seen && \
time_before(jiffies, (in_dev)->mr_v2_seen)))
static int unsolicited_report_interval(struct in_device *in_dev)
{
int interval_ms, interval_jiffies;
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
interval_ms = IN_DEV_CONF_GET(
in_dev,
IGMPV2_UNSOLICITED_REPORT_INTERVAL);
else /* v3 */
interval_ms = IN_DEV_CONF_GET(
in_dev,
IGMPV3_UNSOLICITED_REPORT_INTERVAL);
interval_jiffies = msecs_to_jiffies(interval_ms);
/* _timer functions can't handle a delay of 0 jiffies so ensure
* we always return a positive value.
*/
if (interval_jiffies <= 0)
interval_jiffies = 1;
return interval_jiffies;
}
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
static void igmpv3_clear_delrec(struct in_device *in_dev);
static int sf_setstate(struct ip_mc_list *pmc);
static void sf_markstate(struct ip_mc_list *pmc);
#endif
static void ip_mc_clear_src(struct ip_mc_list *pmc);
static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
int sfcount, __be32 *psfsrc, int delta);
static void ip_ma_put(struct ip_mc_list *im)
{
if (atomic_dec_and_test(&im->refcnt)) {
in_dev_put(im->interface);
kfree_rcu(im, rcu);
}
}
#define for_each_pmc_rcu(in_dev, pmc) \
for (pmc = rcu_dereference(in_dev->mc_list); \
pmc != NULL; \
pmc = rcu_dereference(pmc->next_rcu))
#define for_each_pmc_rtnl(in_dev, pmc) \
for (pmc = rtnl_dereference(in_dev->mc_list); \
pmc != NULL; \
pmc = rtnl_dereference(pmc->next_rcu))
#ifdef CONFIG_IP_MULTICAST
/*
* Timer management
*/
static void igmp_stop_timer(struct ip_mc_list *im)
{
spin_lock_bh(&im->lock);
if (del_timer(&im->timer))
atomic_dec(&im->refcnt);
im->tm_running = 0;
im->reporter = 0;
im->unsolicit_count = 0;
spin_unlock_bh(&im->lock);
}
/* It must be called with locked im->lock */
static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
{
int tv = prandom_u32() % max_delay;
im->tm_running = 1;
if (!mod_timer(&im->timer, jiffies+tv+2))
atomic_inc(&im->refcnt);
}
static void igmp_gq_start_timer(struct in_device *in_dev)
{
int tv = prandom_u32() % in_dev->mr_maxdelay;
in_dev->mr_gq_running = 1;
if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
in_dev_hold(in_dev);
}
static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
{
int tv = prandom_u32() % delay;
if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
in_dev_hold(in_dev);
}
static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
{
spin_lock_bh(&im->lock);
im->unsolicit_count = 0;
if (del_timer(&im->timer)) {
if ((long)(im->timer.expires-jiffies) < max_delay) {
add_timer(&im->timer);
im->tm_running = 1;
spin_unlock_bh(&im->lock);
return;
}
atomic_dec(&im->refcnt);
}
igmp_start_timer(im, max_delay);
spin_unlock_bh(&im->lock);
}
/*
* Send an IGMP report.
*/
#define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
int gdeleted, int sdeleted)
{
switch (type) {
case IGMPV3_MODE_IS_INCLUDE:
case IGMPV3_MODE_IS_EXCLUDE:
if (gdeleted || sdeleted)
return 0;
if (!(pmc->gsquery && !psf->sf_gsresp)) {
if (pmc->sfmode == MCAST_INCLUDE)
return 1;
/* don't include if this source is excluded
* in all filters
*/
if (psf->sf_count[MCAST_INCLUDE])
return type == IGMPV3_MODE_IS_INCLUDE;
return pmc->sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
}
return 0;
case IGMPV3_CHANGE_TO_INCLUDE:
if (gdeleted || sdeleted)
return 0;
return psf->sf_count[MCAST_INCLUDE] != 0;
case IGMPV3_CHANGE_TO_EXCLUDE:
if (gdeleted || sdeleted)
return 0;
if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
psf->sf_count[MCAST_INCLUDE])
return 0;
return pmc->sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
case IGMPV3_ALLOW_NEW_SOURCES:
if (gdeleted || !psf->sf_crcount)
return 0;
return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
case IGMPV3_BLOCK_OLD_SOURCES:
if (pmc->sfmode == MCAST_INCLUDE)
return gdeleted || (psf->sf_crcount && sdeleted);
return psf->sf_crcount && !gdeleted && !sdeleted;
}
return 0;
}
static int
igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
{
struct ip_sf_list *psf;
int scount = 0;
for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (!is_in(pmc, psf, type, gdeleted, sdeleted))
continue;
scount++;
}
return scount;
}
#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
{
struct sk_buff *skb;
struct rtable *rt;
struct iphdr *pip;
struct igmpv3_report *pig;
struct net *net = dev_net(dev);
struct flowi4 fl4;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
while (1) {
skb = alloc_skb(size + hlen + tlen,
GFP_ATOMIC | __GFP_NOWARN);
if (skb)
break;
size >>= 1;
if (size < 256)
return NULL;
}
skb->priority = TC_PRIO_CONTROL;
igmp_skb_size(skb) = size;
rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
0, 0,
IPPROTO_IGMP, 0, dev->ifindex);
if (IS_ERR(rt)) {
kfree_skb(skb);
return NULL;
}
skb_dst_set(skb, &rt->dst);
skb->dev = dev;
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
pip = ip_hdr(skb);
skb_put(skb, sizeof(struct iphdr) + 4);
pip->version = 4;
pip->ihl = (sizeof(struct iphdr)+4)>>2;
pip->tos = 0xc0;
pip->frag_off = htons(IP_DF);
pip->ttl = 1;
pip->daddr = fl4.daddr;
pip->saddr = fl4.saddr;
pip->protocol = IPPROTO_IGMP;
pip->tot_len = 0; /* filled in later */
ip_select_ident(skb, NULL);
((u8 *)&pip[1])[0] = IPOPT_RA;
((u8 *)&pip[1])[1] = 4;
((u8 *)&pip[1])[2] = 0;
((u8 *)&pip[1])[3] = 0;
skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
skb_put(skb, sizeof(*pig));
pig = igmpv3_report_hdr(skb);
pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
pig->resv1 = 0;
pig->csum = 0;
pig->resv2 = 0;
pig->ngrec = 0;
return skb;
}
static int igmpv3_sendpack(struct sk_buff *skb)
{
struct igmphdr *pig = igmp_hdr(skb);
const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb);
pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
return ip_local_out(skb);
}
static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
{
return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
}
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
int type, struct igmpv3_grec **ppgr)
{
struct net_device *dev = pmc->interface->dev;
struct igmpv3_report *pih;
struct igmpv3_grec *pgr;
if (!skb)
skb = igmpv3_newpack(dev, dev->mtu);
if (!skb)
return NULL;
pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
pgr->grec_type = type;
pgr->grec_auxwords = 0;
pgr->grec_nsrcs = 0;
pgr->grec_mca = pmc->multiaddr;
pih = igmpv3_report_hdr(skb);
pih->ngrec = htons(ntohs(pih->ngrec)+1);
*ppgr = pgr;
return skb;
}
#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
skb_tailroom(skb)) : 0)
static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
int type, int gdeleted, int sdeleted)
{
struct net_device *dev = pmc->interface->dev;
struct igmpv3_report *pih;
struct igmpv3_grec *pgr = NULL;
struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
int scount, stotal, first, isquery, truncate;
if (pmc->multiaddr == IGMP_ALL_HOSTS)
return skb;
isquery = type == IGMPV3_MODE_IS_INCLUDE ||
type == IGMPV3_MODE_IS_EXCLUDE;
truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
type == IGMPV3_CHANGE_TO_EXCLUDE;
stotal = scount = 0;
psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
if (!*psf_list)
goto empty_source;
pih = skb ? igmpv3_report_hdr(skb) : NULL;
/* EX and TO_EX get a fresh packet, if needed */
if (truncate) {
if (pih && pih->ngrec &&
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
if (skb)
igmpv3_sendpack(skb);
skb = igmpv3_newpack(dev, dev->mtu);
}
}
first = 1;
psf_prev = NULL;
for (psf = *psf_list; psf; psf = psf_next) {
__be32 *psrc;
psf_next = psf->sf_next;
if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
psf_prev = psf;
continue;
}
/* clear marks on query responses */
if (isquery)
psf->sf_gsresp = 0;
if (AVAILABLE(skb) < sizeof(__be32) +
first*sizeof(struct igmpv3_grec)) {
if (truncate && !first)
break; /* truncate these */
if (pgr)
pgr->grec_nsrcs = htons(scount);
if (skb)
igmpv3_sendpack(skb);
skb = igmpv3_newpack(dev, dev->mtu);
first = 1;
scount = 0;
}
if (first) {
skb = add_grhead(skb, pmc, type, &pgr);
first = 0;
}
if (!skb)
return NULL;
psrc = (__be32 *)skb_put(skb, sizeof(__be32));
*psrc = psf->sf_inaddr;
scount++; stotal++;
if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
psf->sf_crcount--;
if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
if (psf_prev)
psf_prev->sf_next = psf->sf_next;
else
*psf_list = psf->sf_next;
kfree(psf);
continue;
}
}
psf_prev = psf;
}
empty_source:
if (!stotal) {
if (type == IGMPV3_ALLOW_NEW_SOURCES ||
type == IGMPV3_BLOCK_OLD_SOURCES)
return skb;
if (pmc->crcount || isquery) {
/* make sure we have room for group header */
if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) {
igmpv3_sendpack(skb);
skb = NULL; /* add_grhead will get a new one */
}
skb = add_grhead(skb, pmc, type, &pgr);
}
}
if (pgr)
pgr->grec_nsrcs = htons(scount);
if (isquery)
pmc->gsquery = 0; /* clear query state on report */
return skb;
}
static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
{
struct sk_buff *skb = NULL;
int type;
if (!pmc) {
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
if (pmc->multiaddr == IGMP_ALL_HOSTS)
continue;
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE])
type = IGMPV3_MODE_IS_EXCLUDE;
else
type = IGMPV3_MODE_IS_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
spin_unlock_bh(&pmc->lock);
}
rcu_read_unlock();
} else {
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE])
type = IGMPV3_MODE_IS_EXCLUDE;
else
type = IGMPV3_MODE_IS_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
spin_unlock_bh(&pmc->lock);
}
if (!skb)
return 0;
return igmpv3_sendpack(skb);
}
/*
* remove zero-count source records from a source filter list
*/
static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
{
struct ip_sf_list *psf_prev, *psf_next, *psf;
psf_prev = NULL;
for (psf = *ppsf; psf; psf = psf_next) {
psf_next = psf->sf_next;
if (psf->sf_crcount == 0) {
if (psf_prev)
psf_prev->sf_next = psf->sf_next;
else
*ppsf = psf->sf_next;
kfree(psf);
} else
psf_prev = psf;
}
}
static void igmpv3_send_cr(struct in_device *in_dev)
{
struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
struct sk_buff *skb = NULL;
int type, dtype;
rcu_read_lock();
spin_lock_bh(&in_dev->mc_tomb_lock);
/* deleted MCA's */
pmc_prev = NULL;
for (pmc = in_dev->mc_tomb; pmc; pmc = pmc_next) {
pmc_next = pmc->next;
if (pmc->sfmode == MCAST_INCLUDE) {
type = IGMPV3_BLOCK_OLD_SOURCES;
dtype = IGMPV3_BLOCK_OLD_SOURCES;
skb = add_grec(skb, pmc, type, 1, 0);
skb = add_grec(skb, pmc, dtype, 1, 1);
}
if (pmc->crcount) {
if (pmc->sfmode == MCAST_EXCLUDE) {
type = IGMPV3_CHANGE_TO_INCLUDE;
skb = add_grec(skb, pmc, type, 1, 0);
}
pmc->crcount--;
if (pmc->crcount == 0) {
igmpv3_clear_zeros(&pmc->tomb);
igmpv3_clear_zeros(&pmc->sources);
}
}
if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
if (pmc_prev)
pmc_prev->next = pmc_next;
else
in_dev->mc_tomb = pmc_next;
in_dev_put(pmc->interface);
kfree(pmc);
} else
pmc_prev = pmc;
}
spin_unlock_bh(&in_dev->mc_tomb_lock);
/* change recs */
for_each_pmc_rcu(in_dev, pmc) {
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE]) {
type = IGMPV3_BLOCK_OLD_SOURCES;
dtype = IGMPV3_ALLOW_NEW_SOURCES;
} else {
type = IGMPV3_ALLOW_NEW_SOURCES;
dtype = IGMPV3_BLOCK_OLD_SOURCES;
}
skb = add_grec(skb, pmc, type, 0, 0);
skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
/* filter mode changes */
if (pmc->crcount) {
if (pmc->sfmode == MCAST_EXCLUDE)
type = IGMPV3_CHANGE_TO_EXCLUDE;
else
type = IGMPV3_CHANGE_TO_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
pmc->crcount--;
}
spin_unlock_bh(&pmc->lock);
}
rcu_read_unlock();
if (!skb)
return;
(void) igmpv3_sendpack(skb);
}
static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
int type)
{
struct sk_buff *skb;
struct iphdr *iph;
struct igmphdr *ih;
struct rtable *rt;
struct net_device *dev = in_dev->dev;
struct net *net = dev_net(dev);
__be32 group = pmc ? pmc->multiaddr : 0;
struct flowi4 fl4;
__be32 dst;
int hlen, tlen;
if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
return igmpv3_send_report(in_dev, pmc);
else if (type == IGMP_HOST_LEAVE_MESSAGE)
dst = IGMP_ALL_ROUTER;
else
dst = group;
rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
0, 0,
IPPROTO_IGMP, 0, dev->ifindex);
if (IS_ERR(rt))
return -1;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
if (skb == NULL) {
ip_rt_put(rt);
return -1;
}
skb->priority = TC_PRIO_CONTROL;
skb_dst_set(skb, &rt->dst);
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
iph = ip_hdr(skb);
skb_put(skb, sizeof(struct iphdr) + 4);
iph->version = 4;
iph->ihl = (sizeof(struct iphdr)+4)>>2;
iph->tos = 0xc0;
iph->frag_off = htons(IP_DF);
iph->ttl = 1;
iph->daddr = dst;
iph->saddr = fl4.saddr;
iph->protocol = IPPROTO_IGMP;
ip_select_ident(skb, NULL);
((u8 *)&iph[1])[0] = IPOPT_RA;
((u8 *)&iph[1])[1] = 4;
((u8 *)&iph[1])[2] = 0;
((u8 *)&iph[1])[3] = 0;
ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
ih->type = type;
ih->code = 0;
ih->csum = 0;
ih->group = group;
ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
return ip_local_out(skb);
}
static void igmp_gq_timer_expire(unsigned long data)
{
struct in_device *in_dev = (struct in_device *)data;
in_dev->mr_gq_running = 0;
igmpv3_send_report(in_dev, NULL);
in_dev_put(in_dev);
}
static void igmp_ifc_timer_expire(unsigned long data)
{
struct in_device *in_dev = (struct in_device *)data;
igmpv3_send_cr(in_dev);
if (in_dev->mr_ifc_count) {
in_dev->mr_ifc_count--;
igmp_ifc_start_timer(in_dev,
unsolicited_report_interval(in_dev));
}
in_dev_put(in_dev);
}
static void igmp_ifc_event(struct in_device *in_dev)
{
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
return;
in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
igmp_ifc_start_timer(in_dev, 1);
}
static void igmp_timer_expire(unsigned long data)
{
struct ip_mc_list *im = (struct ip_mc_list *)data;
struct in_device *in_dev = im->interface;
spin_lock(&im->lock);
im->tm_running = 0;
if (im->unsolicit_count) {
im->unsolicit_count--;
igmp_start_timer(im, unsolicited_report_interval(in_dev));
}
im->reporter = 1;
spin_unlock(&im->lock);
if (IGMP_V1_SEEN(in_dev))
igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
else if (IGMP_V2_SEEN(in_dev))
igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
else
igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
ip_ma_put(im);
}
/* mark EXCLUDE-mode sources */
static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
{
struct ip_sf_list *psf;
int i, scount;
scount = 0;
for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (scount == nsrcs)
break;
for (i = 0; i < nsrcs; i++) {
/* skip inactive filters */
if (psf->sf_count[MCAST_INCLUDE] ||
pmc->sfcount[MCAST_EXCLUDE] !=
psf->sf_count[MCAST_EXCLUDE])
break;
if (srcs[i] == psf->sf_inaddr) {
scount++;
break;
}
}
}
pmc->gsquery = 0;
if (scount == nsrcs) /* all sources excluded */
return 0;
return 1;
}
static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
{
struct ip_sf_list *psf;
int i, scount;
if (pmc->sfmode == MCAST_EXCLUDE)
return igmp_xmarksources(pmc, nsrcs, srcs);
/* mark INCLUDE-mode sources */
scount = 0;
for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (scount == nsrcs)
break;
for (i = 0; i < nsrcs; i++)
if (srcs[i] == psf->sf_inaddr) {
psf->sf_gsresp = 1;
scount++;
break;
}
}
if (!scount) {
pmc->gsquery = 0;
return 0;
}
pmc->gsquery = 1;
return 1;
}
/* return true if packet was dropped */
static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
{
struct ip_mc_list *im;
/* Timers are only set for non-local groups */
if (group == IGMP_ALL_HOSTS)
return false;
rcu_read_lock();
for_each_pmc_rcu(in_dev, im) {
if (im->multiaddr == group) {
igmp_stop_timer(im);
break;
}
}
rcu_read_unlock();
return false;
}
/* return true if packet was dropped */
static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
int len)
{
struct igmphdr *ih = igmp_hdr(skb);
struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
struct ip_mc_list *im;
__be32 group = ih->group;
int max_delay;
int mark = 0;
if (len == 8) {
if (ih->code == 0) {
/* Alas, old v1 router presents here. */
max_delay = IGMP_Query_Response_Interval;
in_dev->mr_v1_seen = jiffies +
IGMP_V1_Router_Present_Timeout;
group = 0;
} else {
/* v2 router present */
max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
in_dev->mr_v2_seen = jiffies +
IGMP_V2_Router_Present_Timeout;
}
/* cancel the interface change timer */
in_dev->mr_ifc_count = 0;
if (del_timer(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
/* clear deleted report items */
igmpv3_clear_delrec(in_dev);
} else if (len < 12) {
return true; /* ignore bogus packet; freed by caller */
} else if (IGMP_V1_SEEN(in_dev)) {
/* This is a v3 query with v1 queriers present */
max_delay = IGMP_Query_Response_Interval;
group = 0;
} else if (IGMP_V2_SEEN(in_dev)) {
/* this is a v3 query with v2 queriers present;
* Interpretation of the max_delay code is problematic here.
* A real v2 host would use ih_code directly, while v3 has a
* different encoding. We use the v3 encoding as more likely
* to be intended in a v3 query.
*/
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
if (!max_delay)
max_delay = 1; /* can't mod w/ 0 */
} else { /* v3 */
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
return true;
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs) {
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
+ ntohs(ih3->nsrcs)*sizeof(__be32)))
return true;
ih3 = igmpv3_query_hdr(skb);
}
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
if (!max_delay)
max_delay = 1; /* can't mod w/ 0 */
in_dev->mr_maxdelay = max_delay;
if (ih3->qrv)
in_dev->mr_qrv = ih3->qrv;
if (!group) { /* general query */
if (ih3->nsrcs)
return false; /* no sources allowed */
igmp_gq_start_timer(in_dev);
return false;
}
/* mark sources to include, if group & source-specific */
mark = ih3->nsrcs != 0;
}
/*
* - Start the timers in all of our membership records
* that the query applies to for the interface on
* which the query arrived excl. those that belong
* to a "local" group (224.0.0.X)
* - For timers already running check if they need to
* be reset.
* - Use the igmp->igmp_code field as the maximum
* delay possible
*/
rcu_read_lock();
for_each_pmc_rcu(in_dev, im) {
int changed;
if (group && group != im->multiaddr)
continue;
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
spin_lock_bh(&im->lock);
if (im->tm_running)
im->gsquery = im->gsquery && mark;
else
im->gsquery = mark;
changed = !im->gsquery ||
igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
spin_unlock_bh(&im->lock);
if (changed)
igmp_mod_timer(im, max_delay);
}
rcu_read_unlock();
return false;
}
/* called in rcu_read_lock() section */
int igmp_rcv(struct sk_buff *skb)
{
/* This basically follows the spec line by line -- see RFC1112 */
struct igmphdr *ih;
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
int len = skb->len;
bool dropped = true;
if (in_dev == NULL)
goto drop;
if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
goto drop;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb->csum))
break;
/* fall through */
case CHECKSUM_NONE:
skb->csum = 0;
if (__skb_checksum_complete(skb))
goto drop;
}
ih = igmp_hdr(skb);
switch (ih->type) {
case IGMP_HOST_MEMBERSHIP_QUERY:
dropped = igmp_heard_query(in_dev, skb, len);
break;
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
/* Is it our report looped back? */
if (rt_is_output_route(skb_rtable(skb)))
break;
/* don't rely on MC router hearing unicast reports */
if (skb->pkt_type == PACKET_MULTICAST ||
skb->pkt_type == PACKET_BROADCAST)
dropped = igmp_heard_report(in_dev, ih->group);
break;
case IGMP_PIM:
#ifdef CONFIG_IP_PIMSM_V1
return pim_rcv_v1(skb);
#endif
case IGMPV3_HOST_MEMBERSHIP_REPORT:
case IGMP_DVMRP:
case IGMP_TRACE:
case IGMP_HOST_LEAVE_MESSAGE:
case IGMP_MTRACE:
case IGMP_MTRACE_RESP:
break;
default:
break;
}
drop:
if (dropped)
kfree_skb(skb);
else
consume_skb(skb);
return 0;
}
#endif
/*
* Add a filter to a device
*/
static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
{
char buf[MAX_ADDR_LEN];
struct net_device *dev = in_dev->dev;
/* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
We will get multicast token leakage, when IFF_MULTICAST
is changed. This check should be done in ndo_set_rx_mode
routine. Something sort of:
if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
--ANK
*/
if (arp_mc_map(addr, buf, dev, 0) == 0)
dev_mc_add(dev, buf);
}
/*
* Remove a filter from a device
*/
static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
{
char buf[MAX_ADDR_LEN];
struct net_device *dev = in_dev->dev;
if (arp_mc_map(addr, buf, dev, 0) == 0)
dev_mc_del(dev, buf);
}
#ifdef CONFIG_IP_MULTICAST
/*
* deleted ip_mc_list manipulation
*/
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
{
struct ip_mc_list *pmc;
/* this is an "ip_mc_list" for convenience; only the fields below
* are actually used. In particular, the refcnt and users are not
* used for management of the delete list. Using the same structure
* for deleted items allows change reports to use common code with
* non-deleted or query-response MCA's.
*/
pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
if (!pmc)
return;
spin_lock_bh(&im->lock);
pmc->interface = im->interface;
in_dev_hold(in_dev);
pmc->multiaddr = im->multiaddr;
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
pmc->sfmode = im->sfmode;
if (pmc->sfmode == MCAST_INCLUDE) {
struct ip_sf_list *psf;
pmc->tomb = im->tomb;
pmc->sources = im->sources;
im->tomb = im->sources = NULL;
for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = pmc->crcount;
}
spin_unlock_bh(&im->lock);
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc->next = in_dev->mc_tomb;
in_dev->mc_tomb = pmc;
spin_unlock_bh(&in_dev->mc_tomb_lock);
}
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
{
struct ip_mc_list *pmc, *pmc_prev;
struct ip_sf_list *psf, *psf_next;
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc_prev = NULL;
for (pmc = in_dev->mc_tomb; pmc; pmc = pmc->next) {
if (pmc->multiaddr == multiaddr)
break;
pmc_prev = pmc;
}
if (pmc) {
if (pmc_prev)
pmc_prev->next = pmc->next;
else
in_dev->mc_tomb = pmc->next;
}
spin_unlock_bh(&in_dev->mc_tomb_lock);
if (pmc) {
for (psf = pmc->tomb; psf; psf = psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
in_dev_put(pmc->interface);
kfree(pmc);
}
}
static void igmpv3_clear_delrec(struct in_device *in_dev)
{
struct ip_mc_list *pmc, *nextpmc;
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc = in_dev->mc_tomb;
in_dev->mc_tomb = NULL;
spin_unlock_bh(&in_dev->mc_tomb_lock);
for (; pmc; pmc = nextpmc) {
nextpmc = pmc->next;
ip_mc_clear_src(pmc);
in_dev_put(pmc->interface);
kfree(pmc);
}
/* clear dead sources, too */
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
struct ip_sf_list *psf, *psf_next;
spin_lock_bh(&pmc->lock);
psf = pmc->tomb;
pmc->tomb = NULL;
spin_unlock_bh(&pmc->lock);
for (; psf; psf = psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
}
rcu_read_unlock();
}
#endif
static void igmp_group_dropped(struct ip_mc_list *im)
{
struct in_device *in_dev = im->interface;
#ifdef CONFIG_IP_MULTICAST
int reporter;
#endif
if (im->loaded) {
im->loaded = 0;
ip_mc_filter_del(in_dev, im->multiaddr);
}
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
reporter = im->reporter;
igmp_stop_timer(im);
if (!in_dev->dead) {
if (IGMP_V1_SEEN(in_dev))
return;
if (IGMP_V2_SEEN(in_dev)) {
if (reporter)
igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
return;
}
/* IGMPv3 */
igmpv3_add_delrec(in_dev, im);
igmp_ifc_event(in_dev);
}
#endif
}
static void igmp_group_added(struct ip_mc_list *im)
{
struct in_device *in_dev = im->interface;
if (im->loaded == 0) {
im->loaded = 1;
ip_mc_filter_add(in_dev, im->multiaddr);
}
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
if (in_dev->dead)
return;
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
spin_lock_bh(&im->lock);
igmp_start_timer(im, IGMP_Initial_Report_Delay);
spin_unlock_bh(&im->lock);
return;
}
/* else, v3 */
im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
igmp_ifc_event(in_dev);
#endif
}
/*
* Multicast list managers
*/
static u32 ip_mc_hash(const struct ip_mc_list *im)
{
return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG);
}
static void ip_mc_hash_add(struct in_device *in_dev,
struct ip_mc_list *im)
{
struct ip_mc_list __rcu **mc_hash;
u32 hash;
mc_hash = rtnl_dereference(in_dev->mc_hash);
if (mc_hash) {
hash = ip_mc_hash(im);
im->next_hash = mc_hash[hash];
rcu_assign_pointer(mc_hash[hash], im);
return;
}
/* do not use a hash table for small number of items */
if (in_dev->mc_count < 4)
return;
mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG,
GFP_KERNEL);
if (!mc_hash)
return;
for_each_pmc_rtnl(in_dev, im) {
hash = ip_mc_hash(im);
im->next_hash = mc_hash[hash];
RCU_INIT_POINTER(mc_hash[hash], im);
}
rcu_assign_pointer(in_dev->mc_hash, mc_hash);
}
static void ip_mc_hash_remove(struct in_device *in_dev,
struct ip_mc_list *im)
{
struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash);
struct ip_mc_list *aux;
if (!mc_hash)
return;
mc_hash += ip_mc_hash(im);
while ((aux = rtnl_dereference(*mc_hash)) != im)
mc_hash = &aux->next_hash;
*mc_hash = im->next_hash;
}
/*
* A socket has joined a multicast group on device dev.
*/
void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
{
struct ip_mc_list *im;
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == addr) {
im->users++;
ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
goto out;
}
}
im = kzalloc(sizeof(*im), GFP_KERNEL);
if (!im)
goto out;
im->users = 1;
im->interface = in_dev;
in_dev_hold(in_dev);
im->multiaddr = addr;
/* initial mode is (EX, empty) */
im->sfmode = MCAST_EXCLUDE;
im->sfcount[MCAST_EXCLUDE] = 1;
atomic_set(&im->refcnt, 1);
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
im->unsolicit_count = IGMP_Unsolicited_Report_Count;
#endif
im->next_rcu = in_dev->mc_list;
in_dev->mc_count++;
rcu_assign_pointer(in_dev->mc_list, im);
ip_mc_hash_add(in_dev, im);
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, im->multiaddr);
#endif
igmp_group_added(im);
if (!in_dev->dead)
ip_rt_multicast_event(in_dev);
out:
return;
}
EXPORT_SYMBOL(ip_mc_inc_group);
/*
* Resend IGMP JOIN report; used by netdev notifier.
*/
static void ip_mc_rejoin_groups(struct in_device *in_dev)
{
#ifdef CONFIG_IP_MULTICAST
struct ip_mc_list *im;
int type;
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
/* a failover is happening and switches
* must be notified immediately
*/
if (IGMP_V1_SEEN(in_dev))
type = IGMP_HOST_MEMBERSHIP_REPORT;
else if (IGMP_V2_SEEN(in_dev))
type = IGMPV2_HOST_MEMBERSHIP_REPORT;
else
type = IGMPV3_HOST_MEMBERSHIP_REPORT;
igmp_send_report(in_dev, im, type);
}
#endif
}
/*
* A socket has left a multicast group on device dev
*/
void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
{
struct ip_mc_list *i;
struct ip_mc_list __rcu **ip;
ASSERT_RTNL();
for (ip = &in_dev->mc_list;
(i = rtnl_dereference(*ip)) != NULL;
ip = &i->next_rcu) {
if (i->multiaddr == addr) {
if (--i->users == 0) {
ip_mc_hash_remove(in_dev, i);
*ip = i->next_rcu;
in_dev->mc_count--;
igmp_group_dropped(i);
ip_mc_clear_src(i);
if (!in_dev->dead)
ip_rt_multicast_event(in_dev);
ip_ma_put(i);
return;
}
break;
}
}
}
EXPORT_SYMBOL(ip_mc_dec_group);
/* Device changing type */
void ip_mc_unmap(struct in_device *in_dev)
{
struct ip_mc_list *pmc;
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, pmc)
igmp_group_dropped(pmc);
}
void ip_mc_remap(struct in_device *in_dev)
{
struct ip_mc_list *pmc;
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, pmc)
igmp_group_added(pmc);
}
/* Device going down */
void ip_mc_down(struct in_device *in_dev)
{
struct ip_mc_list *pmc;
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, pmc)
igmp_group_dropped(pmc);
#ifdef CONFIG_IP_MULTICAST
in_dev->mr_ifc_count = 0;
if (del_timer(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
in_dev->mr_gq_running = 0;
if (del_timer(&in_dev->mr_gq_timer))
__in_dev_put(in_dev);
igmpv3_clear_delrec(in_dev);
#endif
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
}
void ip_mc_init_dev(struct in_device *in_dev)
{
ASSERT_RTNL();
#ifdef CONFIG_IP_MULTICAST
setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
(unsigned long)in_dev);
setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
(unsigned long)in_dev);
in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
#endif
spin_lock_init(&in_dev->mc_tomb_lock);
}
/* Device going up */
void ip_mc_up(struct in_device *in_dev)
{
struct ip_mc_list *pmc;
ASSERT_RTNL();
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
for_each_pmc_rtnl(in_dev, pmc)
igmp_group_added(pmc);
}
/*
* Device is about to be destroyed: clean up.
*/
void ip_mc_destroy_dev(struct in_device *in_dev)
{
struct ip_mc_list *i;
ASSERT_RTNL();
/* Deactivate timers */
ip_mc_down(in_dev);
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
/* We've dropped the groups in ip_mc_down already */
ip_mc_clear_src(i);
ip_ma_put(i);
}
}
/* RTNL is locked */
static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
{
struct net_device *dev = NULL;
struct in_device *idev = NULL;
if (imr->imr_ifindex) {
idev = inetdev_by_index(net, imr->imr_ifindex);
return idev;
}
if (imr->imr_address.s_addr) {
dev = __ip_dev_find(net, imr->imr_address.s_addr, false);
if (!dev)
return NULL;
}
if (!dev) {
struct rtable *rt = ip_route_output(net,
imr->imr_multiaddr.s_addr,
0, 0, 0);
if (!IS_ERR(rt)) {
dev = rt->dst.dev;
ip_rt_put(rt);
}
}
if (dev) {
imr->imr_ifindex = dev->ifindex;
idev = __in_dev_get_rtnl(dev);
}
return idev;
}
/*
* Join a socket to a group
*/
int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
__be32 *psfsrc)
{
struct ip_sf_list *psf, *psf_prev;
int rv = 0;
psf_prev = NULL;
for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (psf->sf_inaddr == *psfsrc)
break;
psf_prev = psf;
}
if (!psf || psf->sf_count[sfmode] == 0) {
/* source filter not found, or count wrong => bug */
return -ESRCH;
}
psf->sf_count[sfmode]--;
if (psf->sf_count[sfmode] == 0) {
ip_rt_multicast_event(pmc->interface);
}
if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
#ifdef CONFIG_IP_MULTICAST
struct in_device *in_dev = pmc->interface;
#endif
/* no more filters for this source */
if (psf_prev)
psf_prev->sf_next = psf->sf_next;
else
pmc->sources = psf->sf_next;
#ifdef CONFIG_IP_MULTICAST
if (psf->sf_oldin &&
!IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
psf->sf_next = pmc->tomb;
pmc->tomb = psf;
rv = 1;
} else
#endif
kfree(psf);
}
return rv;
}
#ifndef CONFIG_IP_MULTICAST
#define igmp_ifc_event(x) do { } while (0)
#endif
static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
int sfcount, __be32 *psfsrc, int delta)
{
struct ip_mc_list *pmc;
int changerec = 0;
int i, err;
if (!in_dev)
return -ENODEV;
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
if (*pmca == pmc->multiaddr)
break;
}
if (!pmc) {
/* MCA not found?? bug */
rcu_read_unlock();
return -ESRCH;
}
spin_lock_bh(&pmc->lock);
rcu_read_unlock();
#ifdef CONFIG_IP_MULTICAST
sf_markstate(pmc);
#endif
if (!delta) {
err = -EINVAL;
if (!pmc->sfcount[sfmode])
goto out_unlock;
pmc->sfcount[sfmode]--;
}
err = 0;
for (i = 0; i < sfcount; i++) {
int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
changerec |= rv > 0;
if (!err && rv < 0)
err = rv;
}
if (pmc->sfmode == MCAST_EXCLUDE &&
pmc->sfcount[MCAST_EXCLUDE] == 0 &&
pmc->sfcount[MCAST_INCLUDE]) {
#ifdef CONFIG_IP_MULTICAST
struct ip_sf_list *psf;
#endif
/* filter mode change */
pmc->sfmode = MCAST_INCLUDE;
#ifdef CONFIG_IP_MULTICAST
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
in_dev->mr_ifc_count = pmc->crcount;
for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(pmc->interface);
} else if (sf_setstate(pmc) || changerec) {
igmp_ifc_event(pmc->interface);
#endif
}
out_unlock:
spin_unlock_bh(&pmc->lock);
return err;
}
/*
* Add multicast single-source filter to the interface list
*/
static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
__be32 *psfsrc)
{
struct ip_sf_list *psf, *psf_prev;
psf_prev = NULL;
for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (psf->sf_inaddr == *psfsrc)
break;
psf_prev = psf;
}
if (!psf) {
psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
if (!psf)
return -ENOBUFS;
psf->sf_inaddr = *psfsrc;
if (psf_prev) {
psf_prev->sf_next = psf;
} else
pmc->sources = psf;
}
psf->sf_count[sfmode]++;
if (psf->sf_count[sfmode] == 1) {
ip_rt_multicast_event(pmc->interface);
}
return 0;
}
#ifdef CONFIG_IP_MULTICAST
static void sf_markstate(struct ip_mc_list *pmc)
{
struct ip_sf_list *psf;
int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
for (psf = pmc->sources; psf; psf = psf->sf_next)
if (pmc->sfcount[MCAST_EXCLUDE]) {
psf->sf_oldin = mca_xcount ==
psf->sf_count[MCAST_EXCLUDE] &&
!psf->sf_count[MCAST_INCLUDE];
} else
psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
}
static int sf_setstate(struct ip_mc_list *pmc)
{
struct ip_sf_list *psf, *dpsf;
int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
int qrv = pmc->interface->mr_qrv;
int new_in, rv;
rv = 0;
for (psf = pmc->sources; psf; psf = psf->sf_next) {
if (pmc->sfcount[MCAST_EXCLUDE]) {
new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
!psf->sf_count[MCAST_INCLUDE];
} else
new_in = psf->sf_count[MCAST_INCLUDE] != 0;
if (new_in) {
if (!psf->sf_oldin) {
struct ip_sf_list *prev = NULL;
for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) {
if (dpsf->sf_inaddr == psf->sf_inaddr)
break;
prev = dpsf;
}
if (dpsf) {
if (prev)
prev->sf_next = dpsf->sf_next;
else
pmc->tomb = dpsf->sf_next;
kfree(dpsf);
}
psf->sf_crcount = qrv;
rv++;
}
} else if (psf->sf_oldin) {
psf->sf_crcount = 0;
/*
* add or update "delete" records if an active filter
* is now inactive
*/
for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next)
if (dpsf->sf_inaddr == psf->sf_inaddr)
break;
if (!dpsf) {
dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
if (!dpsf)
continue;
*dpsf = *psf;
/* pmc->lock held by callers */
dpsf->sf_next = pmc->tomb;
pmc->tomb = dpsf;
}
dpsf->sf_crcount = qrv;
rv++;
}
}
return rv;
}
#endif
/*
* Add multicast source filter list to the interface list
*/
static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
int sfcount, __be32 *psfsrc, int delta)
{
struct ip_mc_list *pmc;
int isexclude;
int i, err;
if (!in_dev)
return -ENODEV;
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
if (*pmca == pmc->multiaddr)
break;
}
if (!pmc) {
/* MCA not found?? bug */
rcu_read_unlock();
return -ESRCH;
}
spin_lock_bh(&pmc->lock);
rcu_read_unlock();
#ifdef CONFIG_IP_MULTICAST
sf_markstate(pmc);
#endif
isexclude = pmc->sfmode == MCAST_EXCLUDE;
if (!delta)
pmc->sfcount[sfmode]++;
err = 0;
for (i = 0; i < sfcount; i++) {
err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
if (err)
break;
}
if (err) {
int j;
if (!delta)
pmc->sfcount[sfmode]--;
for (j = 0; j < i; j++)
(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
#ifdef CONFIG_IP_MULTICAST
struct ip_sf_list *psf;
in_dev = pmc->interface;
#endif
/* filter mode change */
if (pmc->sfcount[MCAST_EXCLUDE])
pmc->sfmode = MCAST_EXCLUDE;
else if (pmc->sfcount[MCAST_INCLUDE])
pmc->sfmode = MCAST_INCLUDE;
#ifdef CONFIG_IP_MULTICAST
/* else no filters; keep old mode for reports */
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
in_dev->mr_ifc_count = pmc->crcount;
for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(in_dev);
} else if (sf_setstate(pmc)) {
igmp_ifc_event(in_dev);
#endif
}
spin_unlock_bh(&pmc->lock);
return err;
}
static void ip_mc_clear_src(struct ip_mc_list *pmc)
{
struct ip_sf_list *psf, *nextpsf;
for (psf = pmc->tomb; psf; psf = nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
pmc->tomb = NULL;
for (psf = pmc->sources; psf; psf = nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
pmc->sources = NULL;
pmc->sfmode = MCAST_EXCLUDE;
pmc->sfcount[MCAST_INCLUDE] = 0;
pmc->sfcount[MCAST_EXCLUDE] = 1;
}
/*
* Join a multicast group
*/
int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
{
int err;
__be32 addr = imr->imr_multiaddr.s_addr;
struct ip_mc_socklist *iml = NULL, *i;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
int ifindex;
int count = 0;
if (!ipv4_is_multicast(addr))
return -EINVAL;
rtnl_lock();
in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
iml = NULL;
err = -ENODEV;
goto done;
}
err = -EADDRINUSE;
ifindex = imr->imr_ifindex;
for_each_pmc_rtnl(inet, i) {
if (i->multi.imr_multiaddr.s_addr == addr &&
i->multi.imr_ifindex == ifindex)
goto done;
count++;
}
err = -ENOBUFS;
if (count >= sysctl_igmp_max_memberships)
goto done;
iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
if (iml == NULL)
goto done;
memcpy(&iml->multi, imr, sizeof(*imr));
iml->next_rcu = inet->mc_list;
iml->sflist = NULL;
iml->sfmode = MCAST_EXCLUDE;
rcu_assign_pointer(inet->mc_list, iml);
ip_mc_inc_group(in_dev, addr);
err = 0;
done:
rtnl_unlock();
return err;
}
EXPORT_SYMBOL(ip_mc_join_group);
static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
struct in_device *in_dev)
{
struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
int err;
if (psf == NULL) {
/* any-source empty exclude case */
return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
iml->sfmode, 0, NULL, 0);
}
err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
iml->sfmode, psf->sl_count, psf->sl_addr, 0);
RCU_INIT_POINTER(iml->sflist, NULL);
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psf, rcu);
return err;
}
/*
* Ask a socket to leave a group.
*/
int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml;
struct ip_mc_socklist __rcu **imlp;
struct in_device *in_dev;
struct net *net = sock_net(sk);
__be32 group = imr->imr_multiaddr.s_addr;
u32 ifindex;
int ret = -EADDRNOTAVAIL;
rtnl_lock();
in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
ret = -ENODEV;
goto out;
}
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list;
(iml = rtnl_dereference(*imlp)) != NULL;
imlp = &iml->next_rcu) {
if (iml->multi.imr_multiaddr.s_addr != group)
continue;
if (ifindex) {
if (iml->multi.imr_ifindex != ifindex)
continue;
} else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
iml->multi.imr_address.s_addr)
continue;
(void) ip_mc_leave_src(sk, iml, in_dev);
*imlp = iml->next_rcu;
ip_mc_dec_group(in_dev, group);
rtnl_unlock();
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
return 0;
}
out:
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL(ip_mc_leave_group);
int ip_mc_source(int add, int omode, struct sock *sk, struct
ip_mreq_source *mreqs, int ifindex)
{
int err;
struct ip_mreqn imr;
__be32 addr = mreqs->imr_multiaddr;
struct ip_mc_socklist *pmc;
struct in_device *in_dev = NULL;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
struct net *net = sock_net(sk);
int leavegroup = 0;
int i, j, rv;
if (!ipv4_is_multicast(addr))
return -EINVAL;
rtnl_lock();
imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
imr.imr_address.s_addr = mreqs->imr_interface;
imr.imr_ifindex = ifindex;
in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
goto done;
}
err = -EADDRNOTAVAIL;
for_each_pmc_rtnl(inet, pmc) {
if ((pmc->multi.imr_multiaddr.s_addr ==
imr.imr_multiaddr.s_addr) &&
(pmc->multi.imr_ifindex == imr.imr_ifindex))
break;
}
if (!pmc) { /* must have a prior join */
err = -EINVAL;
goto done;
}
/* if a source filter was set, must be the same mode as before */
if (pmc->sflist) {
if (pmc->sfmode != omode) {
err = -EINVAL;
goto done;
}
} else if (pmc->sfmode != omode) {
/* allow mode switches for empty-set filters */
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
NULL, 0);
pmc->sfmode = omode;
}
psl = rtnl_dereference(pmc->sflist);
if (!add) {
if (!psl)
goto done; /* err = -EADDRNOTAVAIL */
rv = !0;
for (i = 0; i < psl->sl_count; i++) {
rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
sizeof(__be32));
if (rv == 0)
break;
}
if (rv) /* source not found */
goto done; /* err = -EADDRNOTAVAIL */
/* special case - (INCLUDE, empty) == LEAVE_GROUP */
if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
leavegroup = 1;
goto done;
}
/* update the interface filter */
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
&mreqs->imr_sourceaddr, 1);
for (j = i+1; j < psl->sl_count; j++)
psl->sl_addr[j-1] = psl->sl_addr[j];
psl->sl_count--;
err = 0;
goto done;
}
/* else, add a new source to the filter */
if (psl && psl->sl_count >= sysctl_igmp_max_msf) {
err = -ENOBUFS;
goto done;
}
if (!psl || psl->sl_count == psl->sl_max) {
struct ip_sf_socklist *newpsl;
int count = IP_SFBLOCK;
if (psl)
count += psl->sl_max;
newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
if (!newpsl) {
err = -ENOBUFS;
goto done;
}
newpsl->sl_max = count;
newpsl->sl_count = count - IP_SFBLOCK;
if (psl) {
for (i = 0; i < psl->sl_count; i++)
newpsl->sl_addr[i] = psl->sl_addr[i];
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psl, rcu);
}
rcu_assign_pointer(pmc->sflist, newpsl);
psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
for (i = 0; i < psl->sl_count; i++) {
rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
sizeof(__be32));
if (rv == 0)
break;
}
if (rv == 0) /* address already there is an error */
goto done;
for (j = psl->sl_count-1; j >= i; j--)
psl->sl_addr[j+1] = psl->sl_addr[j];
psl->sl_addr[i] = mreqs->imr_sourceaddr;
psl->sl_count++;
err = 0;
/* update the interface list */
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
&mreqs->imr_sourceaddr, 1);
done:
rtnl_unlock();
if (leavegroup)
return ip_mc_leave_group(sk, &imr);
return err;
}
int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
{
int err = 0;
struct ip_mreqn imr;
__be32 addr = msf->imsf_multiaddr;
struct ip_mc_socklist *pmc;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *newpsl, *psl;
struct net *net = sock_net(sk);
int leavegroup = 0;
if (!ipv4_is_multicast(addr))
return -EINVAL;
if (msf->imsf_fmode != MCAST_INCLUDE &&
msf->imsf_fmode != MCAST_EXCLUDE)
return -EINVAL;
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = ifindex;
in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
goto done;
}
/* special case - (INCLUDE, empty) == LEAVE_GROUP */
if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
leavegroup = 1;
goto done;
}
for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
pmc->multi.imr_ifindex == imr.imr_ifindex)
break;
}
if (!pmc) { /* must have a prior join */
err = -EINVAL;
goto done;
}
if (msf->imsf_numsrc) {
newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
GFP_KERNEL);
if (!newpsl) {
err = -ENOBUFS;
goto done;
}
newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
memcpy(newpsl->sl_addr, msf->imsf_slist,
msf->imsf_numsrc * sizeof(msf->imsf_slist[0]));
err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
if (err) {
sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
goto done;
}
} else {
newpsl = NULL;
(void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
msf->imsf_fmode, 0, NULL, 0);
}
psl = rtnl_dereference(pmc->sflist);
if (psl) {
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
psl->sl_count, psl->sl_addr, 0);
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psl, rcu);
} else
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
0, NULL, 0);
rcu_assign_pointer(pmc->sflist, newpsl);
pmc->sfmode = msf->imsf_fmode;
err = 0;
done:
rtnl_unlock();
if (leavegroup)
err = ip_mc_leave_group(sk, &imr);
return err;
}
int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
struct ip_msfilter __user *optval, int __user *optlen)
{
int err, len, count, copycount;
struct ip_mreqn imr;
__be32 addr = msf->imsf_multiaddr;
struct ip_mc_socklist *pmc;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
struct net *net = sock_net(sk);
if (!ipv4_is_multicast(addr))
return -EINVAL;
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = 0;
in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
goto done;
}
err = -EADDRNOTAVAIL;
for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
pmc->multi.imr_ifindex == imr.imr_ifindex)
break;
}
if (!pmc) /* must have a prior join */
goto done;
msf->imsf_fmode = pmc->sfmode;
psl = rtnl_dereference(pmc->sflist);
rtnl_unlock();
if (!psl) {
len = 0;
count = 0;
} else {
count = psl->sl_count;
}
copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
len = copycount * sizeof(psl->sl_addr[0]);
msf->imsf_numsrc = count;
if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) {
return -EFAULT;
}
if (len &&
copy_to_user(&optval->imsf_slist[0], psl->sl_addr, len))
return -EFAULT;
return 0;
done:
rtnl_unlock();
return err;
}
int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
struct group_filter __user *optval, int __user *optlen)
{
int err, i, count, copycount;
struct sockaddr_in *psin;
__be32 addr;
struct ip_mc_socklist *pmc;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
psin = (struct sockaddr_in *)&gsf->gf_group;
if (psin->sin_family != AF_INET)
return -EINVAL;
addr = psin->sin_addr.s_addr;
if (!ipv4_is_multicast(addr))
return -EINVAL;
rtnl_lock();
err = -EADDRNOTAVAIL;
for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == addr &&
pmc->multi.imr_ifindex == gsf->gf_interface)
break;
}
if (!pmc) /* must have a prior join */
goto done;
gsf->gf_fmode = pmc->sfmode;
psl = rtnl_dereference(pmc->sflist);
rtnl_unlock();
count = psl ? psl->sl_count : 0;
copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
gsf->gf_numsrc = count;
if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
return -EFAULT;
}
for (i = 0; i < copycount; i++) {
struct sockaddr_storage ss;
psin = (struct sockaddr_in *)&ss;
memset(&ss, 0, sizeof(ss));
psin->sin_family = AF_INET;
psin->sin_addr.s_addr = psl->sl_addr[i];
if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
return -EFAULT;
}
return 0;
done:
rtnl_unlock();
return err;
}
/*
* check if a multicast source filter allows delivery for a given <src,dst,intf>
*/
int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *pmc;
struct ip_sf_socklist *psl;
int i;
int ret;
ret = 1;
if (!ipv4_is_multicast(loc_addr))
goto out;
rcu_read_lock();
for_each_pmc_rcu(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
pmc->multi.imr_ifindex == dif)
break;
}
ret = inet->mc_all;
if (!pmc)
goto unlock;
psl = rcu_dereference(pmc->sflist);
ret = (pmc->sfmode == MCAST_EXCLUDE);
if (!psl)
goto unlock;
for (i = 0; i < psl->sl_count; i++) {
if (psl->sl_addr[i] == rmt_addr)
break;
}
ret = 0;
if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
goto unlock;
if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
goto unlock;
ret = 1;
unlock:
rcu_read_unlock();
out:
return ret;
}
/*
* A socket is closing.
*/
void ip_mc_drop_socket(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml;
struct net *net = sock_net(sk);
if (inet->mc_list == NULL)
return;
rtnl_lock();
while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
struct in_device *in_dev;
inet->mc_list = iml->next_rcu;
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
if (in_dev != NULL)
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
}
rtnl_unlock();
}
/* called with rcu_read_lock() */
int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
{
struct ip_mc_list *im;
struct ip_mc_list __rcu **mc_hash;
struct ip_sf_list *psf;
int rv = 0;
mc_hash = rcu_dereference(in_dev->mc_hash);
if (mc_hash) {
u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG);
for (im = rcu_dereference(mc_hash[hash]);
im != NULL;
im = rcu_dereference(im->next_hash)) {
if (im->multiaddr == mc_addr)
break;
}
} else {
for_each_pmc_rcu(in_dev, im) {
if (im->multiaddr == mc_addr)
break;
}
}
if (im && proto == IPPROTO_IGMP) {
rv = 1;
} else if (im) {
if (src_addr) {
for (psf = im->sources; psf; psf = psf->sf_next) {
if (psf->sf_inaddr == src_addr)
break;
}
if (psf)
rv = psf->sf_count[MCAST_INCLUDE] ||
psf->sf_count[MCAST_EXCLUDE] !=
im->sfcount[MCAST_EXCLUDE];
else
rv = im->sfcount[MCAST_EXCLUDE] != 0;
} else
rv = 1; /* unspecified source; tentatively allow */
}
return rv;
}
#if defined(CONFIG_PROC_FS)
struct igmp_mc_iter_state {
struct seq_net_private p;
struct net_device *dev;
struct in_device *in_dev;
};
#define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private)
static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ip_mc_list *im = NULL;
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
state->in_dev = NULL;
for_each_netdev_rcu(net, state->dev) {
struct in_device *in_dev;
in_dev = __in_dev_get_rcu(state->dev);
if (!in_dev)
continue;
im = rcu_dereference(in_dev->mc_list);
if (im) {
state->in_dev = in_dev;
break;
}
}
return im;
}
static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
{
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
im = rcu_dereference(im->next_rcu);
while (!im) {
state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->in_dev = NULL;
break;
}
state->in_dev = __in_dev_get_rcu(state->dev);
if (!state->in_dev)
continue;
im = rcu_dereference(state->in_dev->mc_list);
}
return im;
}
static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
{
struct ip_mc_list *im = igmp_mc_get_first(seq);
if (im)
while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
--pos;
return pos ? NULL : im;
}
static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu)
{
rcu_read_lock();
return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_mc_list *im;
if (v == SEQ_START_TOKEN)
im = igmp_mc_get_first(seq);
else
im = igmp_mc_get_next(seq, v);
++*pos;
return im;
}
static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
__releases(rcu)
{
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
state->in_dev = NULL;
state->dev = NULL;
rcu_read_unlock();
}
static int igmp_mc_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
else {
struct ip_mc_list *im = (struct ip_mc_list *)v;
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
char *querier;
long delta;
#ifdef CONFIG_IP_MULTICAST
querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
IGMP_V2_SEEN(state->in_dev) ? "V2" :
"V3";
#else
querier = "NONE";
#endif
if (rcu_dereference(state->in_dev->mc_list) == im) {
seq_printf(seq, "%d\t%-10s: %5d %7s\n",
state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
}
delta = im->timer.expires - jiffies;
seq_printf(seq,
"\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
im->multiaddr, im->users,
im->tm_running,
im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
im->reporter);
}
return 0;
}
static const struct seq_operations igmp_mc_seq_ops = {
.start = igmp_mc_seq_start,
.next = igmp_mc_seq_next,
.stop = igmp_mc_seq_stop,
.show = igmp_mc_seq_show,
};
static int igmp_mc_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &igmp_mc_seq_ops,
sizeof(struct igmp_mc_iter_state));
}
static const struct file_operations igmp_mc_seq_fops = {
.owner = THIS_MODULE,
.open = igmp_mc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
struct igmp_mcf_iter_state {
struct seq_net_private p;
struct net_device *dev;
struct in_device *idev;
struct ip_mc_list *im;
};
#define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private)
static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ip_sf_list *psf = NULL;
struct ip_mc_list *im = NULL;
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
state->idev = NULL;
state->im = NULL;
for_each_netdev_rcu(net, state->dev) {
struct in_device *idev;
idev = __in_dev_get_rcu(state->dev);
if (unlikely(idev == NULL))
continue;
im = rcu_dereference(idev->mc_list);
if (likely(im != NULL)) {
spin_lock_bh(&im->lock);
psf = im->sources;
if (likely(psf != NULL)) {
state->im = im;
state->idev = idev;
break;
}
spin_unlock_bh(&im->lock);
}
}
return psf;
}
static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
{
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
psf = psf->sf_next;
while (!psf) {
spin_unlock_bh(&state->im->lock);
state->im = state->im->next;
while (!state->im) {
state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
goto out;
}
state->idev = __in_dev_get_rcu(state->dev);
if (!state->idev)
continue;
state->im = rcu_dereference(state->idev->mc_list);
}
if (!state->im)
break;
spin_lock_bh(&state->im->lock);
psf = state->im->sources;
}
out:
return psf;
}
static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
{
struct ip_sf_list *psf = igmp_mcf_get_first(seq);
if (psf)
while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
--pos;
return pos ? NULL : psf;
}
static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu)
{
rcu_read_lock();
return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_sf_list *psf;
if (v == SEQ_START_TOKEN)
psf = igmp_mcf_get_first(seq);
else
psf = igmp_mcf_get_next(seq, v);
++*pos;
return psf;
}
static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
__releases(rcu)
{
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
if (likely(state->im != NULL)) {
spin_unlock_bh(&state->im->lock);
state->im = NULL;
}
state->idev = NULL;
state->dev = NULL;
rcu_read_unlock();
}
static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
{
struct ip_sf_list *psf = (struct ip_sf_list *)v;
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
if (v == SEQ_START_TOKEN) {
seq_printf(seq,
"%3s %6s "
"%10s %10s %6s %6s\n", "Idx",
"Device", "MCA",
"SRC", "INC", "EXC");
} else {
seq_printf(seq,
"%3d %6.6s 0x%08x "
"0x%08x %6lu %6lu\n",
state->dev->ifindex, state->dev->name,
ntohl(state->im->multiaddr),
ntohl(psf->sf_inaddr),
psf->sf_count[MCAST_INCLUDE],
psf->sf_count[MCAST_EXCLUDE]);
}
return 0;
}
static const struct seq_operations igmp_mcf_seq_ops = {
.start = igmp_mcf_seq_start,
.next = igmp_mcf_seq_next,
.stop = igmp_mcf_seq_stop,
.show = igmp_mcf_seq_show,
};
static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &igmp_mcf_seq_ops,
sizeof(struct igmp_mcf_iter_state));
}
static const struct file_operations igmp_mcf_seq_fops = {
.owner = THIS_MODULE,
.open = igmp_mcf_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init igmp_net_init(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
if (!pde)
goto out_igmp;
pde = proc_create("mcfilter", S_IRUGO, net->proc_net,
&igmp_mcf_seq_fops);
if (!pde)
goto out_mcfilter;
return 0;
out_mcfilter:
remove_proc_entry("igmp", net->proc_net);
out_igmp:
return -ENOMEM;
}
static void __net_exit igmp_net_exit(struct net *net)
{
remove_proc_entry("mcfilter", net->proc_net);
remove_proc_entry("igmp", net->proc_net);
}
static struct pernet_operations igmp_net_ops = {
.init = igmp_net_init,
.exit = igmp_net_exit,
};
#endif
static int igmp_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct in_device *in_dev;
switch (event) {
case NETDEV_RESEND_IGMP:
in_dev = __in_dev_get_rtnl(dev);
if (in_dev)
ip_mc_rejoin_groups(in_dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block igmp_notifier = {
.notifier_call = igmp_netdev_event,
};
int __init igmp_mc_init(void)
{
#if defined(CONFIG_PROC_FS)
int err;
err = register_pernet_subsys(&igmp_net_ops);
if (err)
return err;
err = register_netdevice_notifier(&igmp_notifier);
if (err)
goto reg_notif_fail;
return 0;
reg_notif_fail:
unregister_pernet_subsys(&igmp_net_ops);
return err;
#else
return register_netdevice_notifier(&igmp_notifier);
#endif
}
| gpl-2.0 |
kuba160/tf300t-kernel | drivers/net/acenic.c | 386 | 87478 | /*
* acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
* and other Tigon based cards.
*
* Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>.
*
* Thanks to Alteon and 3Com for providing hardware and documentation
* enabling me to write this driver.
*
* A mailing list for discussing the use of this driver has been
* setup, please subscribe to the lists if you have any questions
* about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to
* see how to subscribe.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Additional credits:
* Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
* dump support. The trace dump support has not been
* integrated yet however.
* Troy Benjegerdes: Big Endian (PPC) patches.
* Nate Stahl: Better out of memory handling and stats support.
* Aman Singla: Nasty race between interrupt handler and tx code dealing
* with 'testing the tx_ret_csm and setting tx_full'
* David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
* infrastructure and Sparc support
* Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
* driver under Linux/Sparc64
* Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards
* ETHTOOL_GDRVINFO support
* Chip Salzenberg <chip@valinux.com>: Fix race condition between tx
* handler and close() cleanup.
* Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether
* memory mapped IO is enabled to
* make the driver work on RS/6000.
* Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem
* where the driver would disable
* bus master mode if it had to disable
* write and invalidate.
* Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little
* endian systems.
* Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and
* rx producer index when
* flushing the Jumbo ring.
* Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the
* driver init path.
* Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sockios.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/if_vlan.h>
#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
#endif
#include <net/sock.h>
#include <net/ip.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#define DRV_NAME "acenic"
#undef INDEX_DEBUG
#ifdef CONFIG_ACENIC_OMIT_TIGON_I
#define ACE_IS_TIGON_I(ap) 0
#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
#else
#define ACE_IS_TIGON_I(ap) (ap->version == 1)
#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
#endif
#ifndef PCI_VENDOR_ID_ALTEON
#define PCI_VENDOR_ID_ALTEON 0x12ae
#endif
#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
#endif
#ifndef PCI_DEVICE_ID_3COM_3C985
#define PCI_DEVICE_ID_3COM_3C985 0x0001
#endif
#ifndef PCI_VENDOR_ID_NETGEAR
#define PCI_VENDOR_ID_NETGEAR 0x1385
#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
#endif
#ifndef PCI_DEVICE_ID_NETGEAR_GA620T
#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
#endif
/*
* Farallon used the DEC vendor ID by mistake and they seem not
* to care - stinky!
*/
#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
#endif
#ifndef PCI_DEVICE_ID_FARALLON_PN9100T
#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
#endif
#ifndef PCI_VENDOR_ID_SGI
#define PCI_VENDOR_ID_SGI 0x10a9
#endif
#ifndef PCI_DEVICE_ID_SGI_ACENIC
#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
#endif
static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
/*
* Farallon used the DEC vendor ID on their cards incorrectly,
* then later Alteon's ID.
*/
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ }
};
MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
#define ace_sync_irq(irq) synchronize_irq(irq)
#ifndef offset_in_page
#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
#endif
#define ACE_MAX_MOD_PARMS 8
#define BOARD_IDX_STATIC 0
#define BOARD_IDX_OVERFLOW -1
#include "acenic.h"
/*
* These must be defined before the firmware is included.
*/
#define MAX_TEXT_LEN 96*1024
#define MAX_RODATA_LEN 8*1024
#define MAX_DATA_LEN 2*1024
#ifndef tigon2FwReleaseLocal
#define tigon2FwReleaseLocal 0
#endif
/*
* This driver currently supports Tigon I and Tigon II based cards
* including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
* GA620. The driver should also work on the SGI, DEC and Farallon
* versions of the card, however I have not been able to test that
* myself.
*
* This card is really neat, it supports receive hardware checksumming
* and jumbo frames (up to 9000 bytes) and does a lot of work in the
* firmware. Also the programming interface is quite neat, except for
* the parts dealing with the i2c eeprom on the card ;-)
*
* Using jumbo frames:
*
* To enable jumbo frames, simply specify an mtu between 1500 and 9000
* bytes to ifconfig. Jumbo frames can be enabled or disabled at any time
* by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet
* interface number and <MTU> being the MTU value.
*
* Module parameters:
*
* When compiled as a loadable module, the driver allows for a number
* of module parameters to be specified. The driver supports the
* following module parameters:
*
* trace=<val> - Firmware trace level. This requires special traced
* firmware to replace the firmware supplied with
* the driver - for debugging purposes only.
*
* link=<val> - Link state. Normally you want to use the default link
* parameters set by the driver. This can be used to
* override these in case your switch doesn't negotiate
* the link properly. Valid values are:
* 0x0001 - Force half duplex link.
* 0x0002 - Do not negotiate line speed with the other end.
* 0x0010 - 10Mbit/sec link.
* 0x0020 - 100Mbit/sec link.
* 0x0040 - 1000Mbit/sec link.
* 0x0100 - Do not negotiate flow control.
* 0x0200 - Enable RX flow control Y
* 0x0400 - Enable TX flow control Y (Tigon II NICs only).
* Default value is 0x0270, ie. enable link+flow
* control negotiation. Negotiating the highest
* possible link speed with RX flow control enabled.
*
* When disabling link speed negotiation, only one link
* speed is allowed to be specified!
*
* tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
* to wait for more packets to arive before
* interrupting the host, from the time the first
* packet arrives.
*
* rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
* to wait for more packets to arive in the transmit ring,
* before interrupting the host, after transmitting the
* first packet in the ring.
*
* max_tx_desc=<val> - maximum number of transmit descriptors
* (packets) transmitted before interrupting the host.
*
* max_rx_desc=<val> - maximum number of receive descriptors
* (packets) received before interrupting the host.
*
* tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th
* increments of the NIC's on board memory to be used for
* transmit and receive buffers. For the 1MB NIC app. 800KB
* is available, on the 1/2MB NIC app. 300KB is available.
* 68KB will always be available as a minimum for both
* directions. The default value is a 50/50 split.
* dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
* operations, default (1) is to always disable this as
* that is what Alteon does on NT. I have not been able
* to measure any real performance differences with
* this on my systems. Set <val>=0 if you want to
* enable these operations.
*
* If you use more than one NIC, specify the parameters for the
* individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
* run tracing on NIC #2 but not on NIC #1 and #3.
*
* TODO:
*
* - Proper multicast support.
* - NIC dump support.
* - More tuning parameters.
*
* The mini ring is not used under Linux and I am not sure it makes sense
* to actually use it.
*
* New interrupt handler strategy:
*
* The old interrupt handler worked using the traditional method of
* replacing an skbuff with a new one when a packet arrives. However
* the rx rings do not need to contain a static number of buffer
* descriptors, thus it makes sense to move the memory allocation out
* of the main interrupt handler and do it in a bottom half handler
* and only allocate new buffers when the number of buffers in the
* ring is below a certain threshold. In order to avoid starving the
* NIC under heavy load it is however necessary to force allocation
* when hitting a minimum threshold. The strategy for alloction is as
* follows:
*
* RX_LOW_BUF_THRES - allocate buffers in the bottom half
* RX_PANIC_LOW_THRES - we are very low on buffers, allocate
* the buffers in the interrupt handler
* RX_RING_THRES - maximum number of buffers in the rx ring
* RX_MINI_THRES - maximum number of buffers in the mini ring
* RX_JUMBO_THRES - maximum number of buffers in the jumbo ring
*
* One advantagous side effect of this allocation approach is that the
* entire rx processing can be done without holding any spin lock
* since the rx rings and registers are totally independent of the tx
* ring and its registers. This of course includes the kmalloc's of
* new skb's. Thus start_xmit can run in parallel with rx processing
* and the memory allocation on SMP systems.
*
* Note that running the skb reallocation in a bottom half opens up
* another can of races which needs to be handled properly. In
* particular it can happen that the interrupt handler tries to run
* the reallocation while the bottom half is either running on another
* CPU or was interrupted on the same CPU. To get around this the
* driver uses bitops to prevent the reallocation routines from being
* reentered.
*
* TX handling can also be done without holding any spin lock, wheee
* this is fun! since tx_ret_csm is only written to by the interrupt
* handler. The case to be aware of is when shutting down the device
* and cleaning up where it is necessary to make sure that
* start_xmit() is not running while this is happening. Well DaveM
* informs me that this case is already protected against ... bye bye
* Mr. Spin Lock, it was nice to know you.
*
* TX interrupts are now partly disabled so the NIC will only generate
* TX interrupts for the number of coal ticks, not for the number of
* TX packets in the queue. This should reduce the number of TX only,
* ie. when no RX processing is done, interrupts seen.
*/
/*
* Threshold values for RX buffer allocation - the low water marks for
* when to start refilling the rings are set to 75% of the ring
* sizes. It seems to make sense to refill the rings entirely from the
* intrrupt handler once it gets below the panic threshold, that way
* we don't risk that the refilling is moved to another CPU when the
* one running the interrupt handler just got the slab code hot in its
* cache.
*/
#define RX_RING_SIZE 72
#define RX_MINI_SIZE 64
#define RX_JUMBO_SIZE 48
#define RX_PANIC_STD_THRES 16
#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
#define RX_PANIC_MINI_THRES 12
#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
#define RX_PANIC_JUMBO_THRES 6
#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
/*
* Size of the mini ring entries, basically these just should be big
* enough to take TCP ACKs
*/
#define ACE_MINI_SIZE 100
#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
/*
* There seems to be a magic difference in the effect between 995 and 996
* but little difference between 900 and 995 ... no idea why.
*
* There is now a default set of tuning parameters which is set, depending
* on whether or not the user enables Jumbo frames. It's assumed that if
* Jumbo frames are enabled, the user wants optimal tuning for that case.
*/
#define DEF_TX_COAL 400 /* 996 */
#define DEF_TX_MAX_DESC 60 /* was 40 */
#define DEF_RX_COAL 120 /* 1000 */
#define DEF_RX_MAX_DESC 25
#define DEF_TX_RATIO 21 /* 24 */
#define DEF_JUMBO_TX_COAL 20
#define DEF_JUMBO_TX_MAX_DESC 60
#define DEF_JUMBO_RX_COAL 30
#define DEF_JUMBO_RX_MAX_DESC 6
#define DEF_JUMBO_TX_RATIO 21
#if tigon2FwReleaseLocal < 20001118
/*
* Standard firmware and early modifications duplicate
* IRQ load without this flag (coal timer is never reset).
* Note that with this flag tx_coal should be less than
* time to xmit full tx ring.
* 400usec is not so bad for tx ring size of 128.
*/
#define TX_COAL_INTS_ONLY 1 /* worth it */
#else
/*
* With modified firmware, this is not necessary, but still useful.
*/
#define TX_COAL_INTS_ONLY 1
#endif
#define DEF_TRACE 0
#define DEF_STAT (2 * TICKS_PER_SEC)
static int link_state[ACE_MAX_MOD_PARMS];
static int trace[ACE_MAX_MOD_PARMS];
static int tx_coal_tick[ACE_MAX_MOD_PARMS];
static int rx_coal_tick[ACE_MAX_MOD_PARMS];
static int max_tx_desc[ACE_MAX_MOD_PARMS];
static int max_rx_desc[ACE_MAX_MOD_PARMS];
static int tx_ratio[ACE_MAX_MOD_PARMS];
static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
#ifndef CONFIG_ACENIC_OMIT_TIGON_I
MODULE_FIRMWARE("acenic/tg1.bin");
#endif
MODULE_FIRMWARE("acenic/tg2.bin");
module_param_array_named(link, link_state, int, NULL, 0);
module_param_array(trace, int, NULL, 0);
module_param_array(tx_coal_tick, int, NULL, 0);
module_param_array(max_tx_desc, int, NULL, 0);
module_param_array(rx_coal_tick, int, NULL, 0);
module_param_array(max_rx_desc, int, NULL, 0);
module_param_array(tx_ratio, int, NULL, 0);
MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
static const char version[] __devinitconst =
"acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
" http://home.cern.ch/~jes/gige/acenic.html\n";
static int ace_get_settings(struct net_device *, struct ethtool_cmd *);
static int ace_set_settings(struct net_device *, struct ethtool_cmd *);
static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
static const struct ethtool_ops ace_ethtool_ops = {
.get_settings = ace_get_settings,
.set_settings = ace_set_settings,
.get_drvinfo = ace_get_drvinfo,
};
static void ace_watchdog(struct net_device *dev);
static const struct net_device_ops ace_netdev_ops = {
.ndo_open = ace_open,
.ndo_stop = ace_close,
.ndo_tx_timeout = ace_watchdog,
.ndo_get_stats = ace_get_stats,
.ndo_start_xmit = ace_start_xmit,
.ndo_set_multicast_list = ace_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ace_set_mac_addr,
.ndo_change_mtu = ace_change_mtu,
};
static int __devinit acenic_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct net_device *dev;
struct ace_private *ap;
static int boards_found;
dev = alloc_etherdev(sizeof(struct ace_private));
if (dev == NULL) {
printk(KERN_ERR "acenic: Unable to allocate "
"net_device structure!\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
ap = netdev_priv(dev);
ap->pdev = pdev;
ap->name = pci_name(pdev);
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->watchdog_timeo = 5*HZ;
dev->netdev_ops = &ace_netdev_ops;
SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
/* we only display this string ONCE */
if (!boards_found)
printk(version);
if (pci_enable_device(pdev))
goto fail_free_netdev;
/*
* Enable master mode before we start playing with the
* pci_command word since pci_set_master() will modify
* it.
*/
pci_set_master(pdev);
pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
/* OpenFirmware on Mac's does not set this - DOH.. */
if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
"access - was not enabled by BIOS/Firmware\n",
ap->name);
ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
pci_write_config_word(ap->pdev, PCI_COMMAND,
ap->pci_command);
wmb();
}
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
if (ap->pci_latency <= 0x40) {
ap->pci_latency = 0x40;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
}
/*
* Remap the regs into kernel space - this is abuse of
* dev->base_addr since it was means for I/O port
* addresses but who gives a damn.
*/
dev->base_addr = pci_resource_start(pdev, 0);
ap->regs = ioremap(dev->base_addr, 0x4000);
if (!ap->regs) {
printk(KERN_ERR "%s: Unable to map I/O register, "
"AceNIC %i will be disabled.\n",
ap->name, boards_found);
goto fail_free_netdev;
}
switch(pdev->vendor) {
case PCI_VENDOR_ID_ALTEON:
if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
printk(KERN_INFO "%s: Farallon PN9100-T ",
ap->name);
} else {
printk(KERN_INFO "%s: Alteon AceNIC ",
ap->name);
}
break;
case PCI_VENDOR_ID_3COM:
printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
break;
case PCI_VENDOR_ID_NETGEAR:
printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
break;
case PCI_VENDOR_ID_DEC:
if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
printk(KERN_INFO "%s: Farallon PN9000-SX ",
ap->name);
break;
}
case PCI_VENDOR_ID_SGI:
printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
break;
default:
printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
break;
}
printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
printk("irq %d\n", pdev->irq);
#ifdef CONFIG_ACENIC_OMIT_TIGON_I
if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
printk(KERN_ERR "%s: Driver compiled without Tigon I"
" support - NIC disabled\n", dev->name);
goto fail_uninit;
}
#endif
if (ace_allocate_descriptors(dev))
goto fail_free_netdev;
#ifdef MODULE
if (boards_found >= ACE_MAX_MOD_PARMS)
ap->board_idx = BOARD_IDX_OVERFLOW;
else
ap->board_idx = boards_found;
#else
ap->board_idx = BOARD_IDX_STATIC;
#endif
if (ace_init(dev))
goto fail_free_netdev;
if (register_netdev(dev)) {
printk(KERN_ERR "acenic: device registration failed\n");
goto fail_uninit;
}
ap->name = dev->name;
if (ap->pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pdev, dev);
boards_found++;
return 0;
fail_uninit:
ace_init_cleanup(dev);
fail_free_netdev:
free_netdev(dev);
return -ENODEV;
}
static void __devexit acenic_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i;
unregister_netdev(dev);
writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
if (ap->version >= 2)
writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
/*
* This clears any pending interrupts
*/
writel(1, ®s->Mb0Lo);
readl(®s->CpuCtrl); /* flush */
/*
* Make sure no other CPUs are processing interrupts
* on the card before the buffers are being released.
* Otherwise one might experience some `interesting'
* effects.
*
* Then release the RX buffers - jumbo buffers were
* already released in ace_close().
*/
ace_sync_irq(dev->irq);
for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
if (skb) {
struct ring_info *ringp;
dma_addr_t mapping;
ringp = &ap->skb->rx_std_skbuff[i];
mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_std_ring[i].size = 0;
ap->skb->rx_std_skbuff[i].skb = NULL;
dev_kfree_skb(skb);
}
}
if (ap->version >= 2) {
for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
if (skb) {
struct ring_info *ringp;
dma_addr_t mapping;
ringp = &ap->skb->rx_mini_skbuff[i];
mapping = dma_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_mini_ring[i].size = 0;
ap->skb->rx_mini_skbuff[i].skb = NULL;
dev_kfree_skb(skb);
}
}
}
for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
if (skb) {
struct ring_info *ringp;
dma_addr_t mapping;
ringp = &ap->skb->rx_jumbo_skbuff[i];
mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_jumbo_ring[i].size = 0;
ap->skb->rx_jumbo_skbuff[i].skb = NULL;
dev_kfree_skb(skb);
}
}
ace_init_cleanup(dev);
free_netdev(dev);
}
static struct pci_driver acenic_pci_driver = {
.name = "acenic",
.id_table = acenic_pci_tbl,
.probe = acenic_probe_one,
.remove = __devexit_p(acenic_remove_one),
};
static int __init acenic_init(void)
{
return pci_register_driver(&acenic_pci_driver);
}
static void __exit acenic_exit(void)
{
pci_unregister_driver(&acenic_pci_driver);
}
module_init(acenic_init);
module_exit(acenic_exit);
static void ace_free_descriptors(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
int size;
if (ap->rx_std_ring != NULL) {
size = (sizeof(struct rx_desc) *
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES +
RX_MINI_RING_ENTRIES +
RX_RETURN_RING_ENTRIES));
pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
ap->rx_ring_base_dma);
ap->rx_std_ring = NULL;
ap->rx_jumbo_ring = NULL;
ap->rx_mini_ring = NULL;
ap->rx_return_ring = NULL;
}
if (ap->evt_ring != NULL) {
size = (sizeof(struct event) * EVT_RING_ENTRIES);
pci_free_consistent(ap->pdev, size, ap->evt_ring,
ap->evt_ring_dma);
ap->evt_ring = NULL;
}
if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
pci_free_consistent(ap->pdev, size, ap->tx_ring,
ap->tx_ring_dma);
}
ap->tx_ring = NULL;
if (ap->evt_prd != NULL) {
pci_free_consistent(ap->pdev, sizeof(u32),
(void *)ap->evt_prd, ap->evt_prd_dma);
ap->evt_prd = NULL;
}
if (ap->rx_ret_prd != NULL) {
pci_free_consistent(ap->pdev, sizeof(u32),
(void *)ap->rx_ret_prd,
ap->rx_ret_prd_dma);
ap->rx_ret_prd = NULL;
}
if (ap->tx_csm != NULL) {
pci_free_consistent(ap->pdev, sizeof(u32),
(void *)ap->tx_csm, ap->tx_csm_dma);
ap->tx_csm = NULL;
}
}
static int ace_allocate_descriptors(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
int size;
size = (sizeof(struct rx_desc) *
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES +
RX_MINI_RING_ENTRIES +
RX_RETURN_RING_ENTRIES));
ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
&ap->rx_ring_base_dma);
if (ap->rx_std_ring == NULL)
goto fail;
ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
size = (sizeof(struct event) * EVT_RING_ENTRIES);
ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
if (ap->evt_ring == NULL)
goto fail;
/*
* Only allocate a host TX ring for the Tigon II, the Tigon I
* has to use PCI registers for this ;-(
*/
if (!ACE_IS_TIGON_I(ap)) {
size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
&ap->tx_ring_dma);
if (ap->tx_ring == NULL)
goto fail;
}
ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
&ap->evt_prd_dma);
if (ap->evt_prd == NULL)
goto fail;
ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
&ap->rx_ret_prd_dma);
if (ap->rx_ret_prd == NULL)
goto fail;
ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
&ap->tx_csm_dma);
if (ap->tx_csm == NULL)
goto fail;
return 0;
fail:
/* Clean up. */
ace_init_cleanup(dev);
return 1;
}
/*
* Generic cleanup handling data allocated during init. Used when the
* module is unloaded or if an error occurs during initialization
*/
static void ace_init_cleanup(struct net_device *dev)
{
struct ace_private *ap;
ap = netdev_priv(dev);
ace_free_descriptors(dev);
if (ap->info)
pci_free_consistent(ap->pdev, sizeof(struct ace_info),
ap->info, ap->info_dma);
kfree(ap->skb);
kfree(ap->trace_buf);
if (dev->irq)
free_irq(dev->irq, dev);
iounmap(ap->regs);
}
/*
* Commands are considered to be slow.
*/
static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
{
u32 idx;
idx = readl(®s->CmdPrd);
writel(*(u32 *)(cmd), ®s->CmdRng[idx]);
idx = (idx + 1) % CMD_RING_ENTRIES;
writel(idx, ®s->CmdPrd);
}
static int __devinit ace_init(struct net_device *dev)
{
struct ace_private *ap;
struct ace_regs __iomem *regs;
struct ace_info *info = NULL;
struct pci_dev *pdev;
unsigned long myjif;
u64 tmp_ptr;
u32 tig_ver, mac1, mac2, tmp, pci_state;
int board_idx, ecode = 0;
short i;
unsigned char cache_size;
ap = netdev_priv(dev);
regs = ap->regs;
board_idx = ap->board_idx;
/*
* aman@sgi.com - its useful to do a NIC reset here to
* address the `Firmware not running' problem subsequent
* to any crashes involving the NIC
*/
writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl);
readl(®s->HostCtrl); /* PCI write posting */
udelay(5);
/*
* Don't access any other registers before this point!
*/
#ifdef __BIG_ENDIAN
/*
* This will most likely need BYTE_SWAP once we switch
* to using __raw_writel()
*/
writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
®s->HostCtrl);
#else
writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
®s->HostCtrl);
#endif
readl(®s->HostCtrl); /* PCI write posting */
/*
* Stop the NIC CPU and clear pending interrupts
*/
writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
readl(®s->CpuCtrl); /* PCI write posting */
writel(0, ®s->Mb0Lo);
tig_ver = readl(®s->HostCtrl) >> 28;
switch(tig_ver){
#ifndef CONFIG_ACENIC_OMIT_TIGON_I
case 4:
case 5:
printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ",
tig_ver, ap->firmware_major, ap->firmware_minor,
ap->firmware_fix);
writel(0, ®s->LocalCtrl);
ap->version = 1;
ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
break;
#endif
case 6:
printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
tig_ver, ap->firmware_major, ap->firmware_minor,
ap->firmware_fix);
writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
readl(®s->CpuBCtrl); /* PCI write posting */
/*
* The SRAM bank size does _not_ indicate the amount
* of memory on the card, it controls the _bank_ size!
* Ie. a 1MB AceNIC will have two banks of 512KB.
*/
writel(SRAM_BANK_512K, ®s->LocalCtrl);
writel(SYNC_SRAM_TIMING, ®s->MiscCfg);
ap->version = 2;
ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
break;
default:
printk(KERN_WARNING " Unsupported Tigon version detected "
"(%i)\n", tig_ver);
ecode = -ENODEV;
goto init_error;
}
/*
* ModeStat _must_ be set after the SRAM settings as this change
* seems to corrupt the ModeStat and possible other registers.
* The SRAM settings survive resets and setting it to the same
* value a second time works as well. This is what caused the
* `Firmware not running' problem on the Tigon II.
*/
#ifdef __BIG_ENDIAN
writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
#else
writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
#endif
readl(®s->ModeStat); /* PCI write posting */
mac1 = 0;
for(i = 0; i < 4; i++) {
int t;
mac1 = mac1 << 8;
t = read_eeprom_byte(dev, 0x8c+i);
if (t < 0) {
ecode = -EIO;
goto init_error;
} else
mac1 |= (t & 0xff);
}
mac2 = 0;
for(i = 4; i < 8; i++) {
int t;
mac2 = mac2 << 8;
t = read_eeprom_byte(dev, 0x8c+i);
if (t < 0) {
ecode = -EIO;
goto init_error;
} else
mac2 |= (t & 0xff);
}
writel(mac1, ®s->MacAddrHi);
writel(mac2, ®s->MacAddrLo);
dev->dev_addr[0] = (mac1 >> 8) & 0xff;
dev->dev_addr[1] = mac1 & 0xff;
dev->dev_addr[2] = (mac2 >> 24) & 0xff;
dev->dev_addr[3] = (mac2 >> 16) & 0xff;
dev->dev_addr[4] = (mac2 >> 8) & 0xff;
dev->dev_addr[5] = mac2 & 0xff;
printk("MAC: %pM\n", dev->dev_addr);
/*
* Looks like this is necessary to deal with on all architectures,
* even this %$#%$# N440BX Intel based thing doesn't get it right.
* Ie. having two NICs in the machine, one will have the cache
* line set at boot time, the other will not.
*/
pdev = ap->pdev;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
cache_size <<= 2;
if (cache_size != SMP_CACHE_BYTES) {
printk(KERN_INFO " PCI cache line size set incorrectly "
"(%i bytes) by BIOS/FW, ", cache_size);
if (cache_size > SMP_CACHE_BYTES)
printk("expecting %i\n", SMP_CACHE_BYTES);
else {
printk("correcting to %i\n", SMP_CACHE_BYTES);
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
SMP_CACHE_BYTES >> 2);
}
}
pci_state = readl(®s->PciState);
printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
"latency: %i clks\n",
(pci_state & PCI_32BIT) ? 32 : 64,
(pci_state & PCI_66MHZ) ? 66 : 33,
ap->pci_latency);
/*
* Set the max DMA transfer size. Seems that for most systems
* the performance is better when no MAX parameter is
* set. However for systems enabling PCI write and invalidate,
* DMA writes must be set to the L1 cache line size to get
* optimal performance.
*
* The default is now to turn the PCI write and invalidate off
* - that is what Alteon does for NT.
*/
tmp = READ_CMD_MEM | WRITE_CMD_MEM;
if (ap->version >= 2) {
tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
/*
* Tuning parameters only supported for 8 cards
*/
if (board_idx == BOARD_IDX_OVERFLOW ||
dis_pci_mem_inval[board_idx]) {
if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
pci_write_config_word(pdev, PCI_COMMAND,
ap->pci_command);
printk(KERN_INFO " Disabling PCI memory "
"write and invalidate\n");
}
} else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
printk(KERN_INFO " PCI memory write & invalidate "
"enabled by BIOS, enabling counter measures\n");
switch(SMP_CACHE_BYTES) {
case 16:
tmp |= DMA_WRITE_MAX_16;
break;
case 32:
tmp |= DMA_WRITE_MAX_32;
break;
case 64:
tmp |= DMA_WRITE_MAX_64;
break;
case 128:
tmp |= DMA_WRITE_MAX_128;
break;
default:
printk(KERN_INFO " Cache line size %i not "
"supported, PCI write and invalidate "
"disabled\n", SMP_CACHE_BYTES);
ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
pci_write_config_word(pdev, PCI_COMMAND,
ap->pci_command);
}
}
}
#ifdef __sparc__
/*
* On this platform, we know what the best dma settings
* are. We use 64-byte maximum bursts, because if we
* burst larger than the cache line size (or even cross
* a 64byte boundary in a single burst) the UltraSparc
* PCI controller will disconnect at 64-byte multiples.
*
* Read-multiple will be properly enabled above, and when
* set will give the PCI controller proper hints about
* prefetching.
*/
tmp &= ~DMA_READ_WRITE_MASK;
tmp |= DMA_READ_MAX_64;
tmp |= DMA_WRITE_MAX_64;
#endif
#ifdef __alpha__
tmp &= ~DMA_READ_WRITE_MASK;
tmp |= DMA_READ_MAX_128;
/*
* All the docs say MUST NOT. Well, I did.
* Nothing terrible happens, if we load wrong size.
* Bit w&i still works better!
*/
tmp |= DMA_WRITE_MAX_128;
#endif
writel(tmp, ®s->PciState);
#if 0
/*
* The Host PCI bus controller driver has to set FBB.
* If all devices on that PCI bus support FBB, then the controller
* can enable FBB support in the Host PCI Bus controller (or on
* the PCI-PCI bridge if that applies).
* -ggg
*/
/*
* I have received reports from people having problems when this
* bit is enabled.
*/
if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
ap->pci_command |= PCI_COMMAND_FAST_BACK;
pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
}
#endif
/*
* Configure DMA attributes.
*/
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
ap->pci_using_dac = 1;
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
ap->pci_using_dac = 0;
} else {
ecode = -ENODEV;
goto init_error;
}
/*
* Initialize the generic info block and the command+event rings
* and the control blocks for the transmit and receive rings
* as they need to be setup once and for all.
*/
if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
&ap->info_dma))) {
ecode = -EAGAIN;
goto init_error;
}
ap->info = info;
/*
* Get the memory for the skb rings.
*/
if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
ecode = -EAGAIN;
goto init_error;
}
ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,
DRV_NAME, dev);
if (ecode) {
printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
DRV_NAME, pdev->irq);
goto init_error;
} else
dev->irq = pdev->irq;
#ifdef INDEX_DEBUG
spin_lock_init(&ap->debug_lock);
ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
ap->last_std_rx = 0;
ap->last_mini_rx = 0;
#endif
memset(ap->info, 0, sizeof(struct ace_info));
memset(ap->skb, 0, sizeof(struct ace_skb));
ecode = ace_load_firmware(dev);
if (ecode)
goto init_error;
ap->fw_running = 0;
tmp_ptr = ap->info_dma;
writel(tmp_ptr >> 32, ®s->InfoPtrHi);
writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo);
memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
info->evt_ctrl.flags = 0;
*(ap->evt_prd) = 0;
wmb();
set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
writel(0, ®s->EvtCsm);
set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
info->cmd_ctrl.flags = 0;
info->cmd_ctrl.max_len = 0;
for (i = 0; i < CMD_RING_ENTRIES; i++)
writel(0, ®s->CmdRng[i]);
writel(0, ®s->CmdPrd);
writel(0, ®s->CmdCsm);
tmp_ptr = ap->info_dma;
tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
info->rx_std_ctrl.flags =
RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
memset(ap->rx_std_ring, 0,
RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
for (i = 0; i < RX_STD_RING_ENTRIES; i++)
ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
ap->rx_std_skbprd = 0;
atomic_set(&ap->cur_rx_bufs, 0);
set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
(ap->rx_ring_base_dma +
(sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
info->rx_jumbo_ctrl.max_len = 0;
info->rx_jumbo_ctrl.flags =
RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
memset(ap->rx_jumbo_ring, 0,
RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
ap->rx_jumbo_skbprd = 0;
atomic_set(&ap->cur_jumbo_bufs, 0);
memset(ap->rx_mini_ring, 0,
RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
if (ap->version >= 2) {
set_aceaddr(&info->rx_mini_ctrl.rngptr,
(ap->rx_ring_base_dma +
(sizeof(struct rx_desc) *
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES))));
info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
info->rx_mini_ctrl.flags =
RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST;
for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
ap->rx_mini_ring[i].flags =
BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
} else {
set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
info->rx_mini_ctrl.max_len = 0;
}
ap->rx_mini_skbprd = 0;
atomic_set(&ap->cur_mini_bufs, 0);
set_aceaddr(&info->rx_return_ctrl.rngptr,
(ap->rx_ring_base_dma +
(sizeof(struct rx_desc) *
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES +
RX_MINI_RING_ENTRIES))));
info->rx_return_ctrl.flags = 0;
info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
memset(ap->rx_return_ring, 0,
RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
*(ap->rx_ret_prd) = 0;
writel(TX_RING_BASE, ®s->WinBase);
if (ACE_IS_TIGON_I(ap)) {
ap->tx_ring = (__force struct tx_desc *) regs->Window;
for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
* sizeof(struct tx_desc)) / sizeof(u32); i++)
writel(0, (__force void __iomem *)ap->tx_ring + i * 4);
set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
} else {
memset(ap->tx_ring, 0,
MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
}
info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
/*
* The Tigon I does not like having the TX ring in host memory ;-(
*/
if (!ACE_IS_TIGON_I(ap))
tmp |= RCB_FLG_TX_HOST_RING;
#if TX_COAL_INTS_ONLY
tmp |= RCB_FLG_COAL_INT_ONLY;
#endif
info->tx_ctrl.flags = tmp;
set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
/*
* Potential item for tuning parameter
*/
#if 0 /* NO */
writel(DMA_THRESH_16W, ®s->DmaReadCfg);
writel(DMA_THRESH_16W, ®s->DmaWriteCfg);
#else
writel(DMA_THRESH_8W, ®s->DmaReadCfg);
writel(DMA_THRESH_8W, ®s->DmaWriteCfg);
#endif
writel(0, ®s->MaskInt);
writel(1, ®s->IfIdx);
#if 0
/*
* McKinley boxes do not like us fiddling with AssistState
* this early
*/
writel(1, ®s->AssistState);
#endif
writel(DEF_STAT, ®s->TuneStatTicks);
writel(DEF_TRACE, ®s->TuneTrace);
ace_set_rxtx_parms(dev, 0);
if (board_idx == BOARD_IDX_OVERFLOW) {
printk(KERN_WARNING "%s: more than %i NICs detected, "
"ignoring module parameters!\n",
ap->name, ACE_MAX_MOD_PARMS);
} else if (board_idx >= 0) {
if (tx_coal_tick[board_idx])
writel(tx_coal_tick[board_idx],
®s->TuneTxCoalTicks);
if (max_tx_desc[board_idx])
writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc);
if (rx_coal_tick[board_idx])
writel(rx_coal_tick[board_idx],
®s->TuneRxCoalTicks);
if (max_rx_desc[board_idx])
writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc);
if (trace[board_idx])
writel(trace[board_idx], ®s->TuneTrace);
if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
writel(tx_ratio[board_idx], ®s->TxBufRat);
}
/*
* Default link parameters
*/
tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
if(ap->version >= 2)
tmp |= LNK_TX_FLOW_CTL_Y;
/*
* Override link default parameters
*/
if ((board_idx >= 0) && link_state[board_idx]) {
int option = link_state[board_idx];
tmp = LNK_ENABLE;
if (option & 0x01) {
printk(KERN_INFO "%s: Setting half duplex link\n",
ap->name);
tmp &= ~LNK_FULL_DUPLEX;
}
if (option & 0x02)
tmp &= ~LNK_NEGOTIATE;
if (option & 0x10)
tmp |= LNK_10MB;
if (option & 0x20)
tmp |= LNK_100MB;
if (option & 0x40)
tmp |= LNK_1000MB;
if ((option & 0x70) == 0) {
printk(KERN_WARNING "%s: No media speed specified, "
"forcing auto negotiation\n", ap->name);
tmp |= LNK_NEGOTIATE | LNK_1000MB |
LNK_100MB | LNK_10MB;
}
if ((option & 0x100) == 0)
tmp |= LNK_NEG_FCTL;
else
printk(KERN_INFO "%s: Disabling flow control "
"negotiation\n", ap->name);
if (option & 0x200)
tmp |= LNK_RX_FLOW_CTL_Y;
if ((option & 0x400) && (ap->version >= 2)) {
printk(KERN_INFO "%s: Enabling TX flow control\n",
ap->name);
tmp |= LNK_TX_FLOW_CTL_Y;
}
}
ap->link = tmp;
writel(tmp, ®s->TuneLink);
if (ap->version >= 2)
writel(tmp, ®s->TuneFastLink);
writel(ap->firmware_start, ®s->Pc);
writel(0, ®s->Mb0Lo);
/*
* Set tx_csm before we start receiving interrupts, otherwise
* the interrupt handler might think it is supposed to process
* tx ints before we are up and running, which may cause a null
* pointer access in the int handler.
*/
ap->cur_rx = 0;
ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
wmb();
ace_set_txprd(regs, ap, 0);
writel(0, ®s->RxRetCsm);
/*
* Enable DMA engine now.
* If we do this sooner, Mckinley box pukes.
* I assume it's because Tigon II DMA engine wants to check
* *something* even before the CPU is started.
*/
writel(1, ®s->AssistState); /* enable DMA */
/*
* Start the NIC CPU
*/
writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl);
readl(®s->CpuCtrl);
/*
* Wait for the firmware to spin up - max 3 seconds.
*/
myjif = jiffies + 3 * HZ;
while (time_before(jiffies, myjif) && !ap->fw_running)
cpu_relax();
if (!ap->fw_running) {
printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
ace_dump_trace(ap);
writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
readl(®s->CpuCtrl);
/* aman@sgi.com - account for badly behaving firmware/NIC:
* - have observed that the NIC may continue to generate
* interrupts for some reason; attempt to stop it - halt
* second CPU for Tigon II cards, and also clear Mb0
* - if we're a module, we'll fail to load if this was
* the only GbE card in the system => if the kernel does
* see an interrupt from the NIC, code to handle it is
* gone and OOps! - so free_irq also
*/
if (ap->version >= 2)
writel(readl(®s->CpuBCtrl) | CPU_HALT,
®s->CpuBCtrl);
writel(0, ®s->Mb0Lo);
readl(®s->Mb0Lo);
ecode = -EBUSY;
goto init_error;
}
/*
* We load the ring here as there seem to be no way to tell the
* firmware to wipe the ring without re-initializing it.
*/
if (!test_and_set_bit(0, &ap->std_refill_busy))
ace_load_std_rx_ring(dev, RX_RING_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
ap->name);
if (ap->version >= 2) {
if (!test_and_set_bit(0, &ap->mini_refill_busy))
ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling "
"the RX mini ring\n", ap->name);
}
return 0;
init_error:
ace_init_cleanup(dev);
return ecode;
}
static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
int board_idx = ap->board_idx;
if (board_idx >= 0) {
if (!jumbo) {
if (!tx_coal_tick[board_idx])
writel(DEF_TX_COAL, ®s->TuneTxCoalTicks);
if (!max_tx_desc[board_idx])
writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc);
if (!rx_coal_tick[board_idx])
writel(DEF_RX_COAL, ®s->TuneRxCoalTicks);
if (!max_rx_desc[board_idx])
writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc);
if (!tx_ratio[board_idx])
writel(DEF_TX_RATIO, ®s->TxBufRat);
} else {
if (!tx_coal_tick[board_idx])
writel(DEF_JUMBO_TX_COAL,
®s->TuneTxCoalTicks);
if (!max_tx_desc[board_idx])
writel(DEF_JUMBO_TX_MAX_DESC,
®s->TuneMaxTxDesc);
if (!rx_coal_tick[board_idx])
writel(DEF_JUMBO_RX_COAL,
®s->TuneRxCoalTicks);
if (!max_rx_desc[board_idx])
writel(DEF_JUMBO_RX_MAX_DESC,
®s->TuneMaxRxDesc);
if (!tx_ratio[board_idx])
writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat);
}
}
}
static void ace_watchdog(struct net_device *data)
{
struct net_device *dev = data;
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
/*
* We haven't received a stats update event for more than 2.5
* seconds and there is data in the transmit queue, thus we
* assume the card is stuck.
*/
if (*ap->tx_csm != ap->tx_ret_csm) {
printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
dev->name, (unsigned int)readl(®s->HostCtrl));
/* This can happen due to ieee flow control. */
} else {
printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
dev->name);
#if 0
netif_wake_queue(dev);
#endif
}
}
static void ace_tasklet(unsigned long arg)
{
struct net_device *dev = (struct net_device *) arg;
struct ace_private *ap = netdev_priv(dev);
int cur_size;
cur_size = atomic_read(&ap->cur_rx_bufs);
if ((cur_size < RX_LOW_STD_THRES) &&
!test_and_set_bit(0, &ap->std_refill_busy)) {
#ifdef DEBUG
printk("refilling buffers (current %i)\n", cur_size);
#endif
ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
}
if (ap->version >= 2) {
cur_size = atomic_read(&ap->cur_mini_bufs);
if ((cur_size < RX_LOW_MINI_THRES) &&
!test_and_set_bit(0, &ap->mini_refill_busy)) {
#ifdef DEBUG
printk("refilling mini buffers (current %i)\n",
cur_size);
#endif
ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
}
}
cur_size = atomic_read(&ap->cur_jumbo_bufs);
if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
!test_and_set_bit(0, &ap->jumbo_refill_busy)) {
#ifdef DEBUG
printk("refilling jumbo buffers (current %i)\n", cur_size);
#endif
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
ap->tasklet_pending = 0;
}
/*
* Copy the contents of the NIC's trace buffer to kernel memory.
*/
static void ace_dump_trace(struct ace_private *ap)
{
#if 0
if (!ap->trace_buf)
if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
return;
#endif
}
/*
* Load the standard rx ring.
*
* Loading rings is safe without holding the spin lock since this is
* done only before the device is enabled, thus no interrupts are
* generated and by the interrupt handler/tasklet handler.
*/
static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
prefetchw(&ap->cur_rx_bufs);
idx = ap->rx_std_skbprd;
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
struct rx_desc *rd;
dma_addr_t mapping;
skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
if (!skb)
break;
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
mapping, mapping);
rd = &ap->rx_std_ring[idx];
set_aceaddr(&rd->addr, mapping);
rd->size = ACE_STD_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_STD_RING_ENTRIES;
}
if (!i)
goto error_out;
atomic_add(i, &ap->cur_rx_bufs);
ap->rx_std_skbprd = idx;
if (ACE_IS_TIGON_I(ap)) {
struct cmd cmd;
cmd.evt = C_SET_RX_PRD_IDX;
cmd.code = 0;
cmd.idx = ap->rx_std_skbprd;
ace_issue_cmd(regs, &cmd);
} else {
writel(idx, ®s->RxStdPrd);
wmb();
}
out:
clear_bit(0, &ap->std_refill_busy);
return;
error_out:
printk(KERN_INFO "Out of memory when allocating "
"standard receive buffers\n");
goto out;
}
static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
prefetchw(&ap->cur_mini_bufs);
idx = ap->rx_mini_skbprd;
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
struct rx_desc *rd;
dma_addr_t mapping;
skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
if (!skb)
break;
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
mapping, mapping);
rd = &ap->rx_mini_ring[idx];
set_aceaddr(&rd->addr, mapping);
rd->size = ACE_MINI_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_MINI_RING_ENTRIES;
}
if (!i)
goto error_out;
atomic_add(i, &ap->cur_mini_bufs);
ap->rx_mini_skbprd = idx;
writel(idx, ®s->RxMiniPrd);
wmb();
out:
clear_bit(0, &ap->mini_refill_busy);
return;
error_out:
printk(KERN_INFO "Out of memory when allocating "
"mini receive buffers\n");
goto out;
}
/*
* Load the jumbo rx ring, this may happen at any time if the MTU
* is changed to a value > 1500.
*/
static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
idx = ap->rx_jumbo_skbprd;
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
struct rx_desc *rd;
dma_addr_t mapping;
skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
if (!skb)
break;
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
mapping, mapping);
rd = &ap->rx_jumbo_ring[idx];
set_aceaddr(&rd->addr, mapping);
rd->size = ACE_JUMBO_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
}
if (!i)
goto error_out;
atomic_add(i, &ap->cur_jumbo_bufs);
ap->rx_jumbo_skbprd = idx;
if (ACE_IS_TIGON_I(ap)) {
struct cmd cmd;
cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
cmd.code = 0;
cmd.idx = ap->rx_jumbo_skbprd;
ace_issue_cmd(regs, &cmd);
} else {
writel(idx, ®s->RxJumboPrd);
wmb();
}
out:
clear_bit(0, &ap->jumbo_refill_busy);
return;
error_out:
if (net_ratelimit())
printk(KERN_INFO "Out of memory when allocating "
"jumbo receive buffers\n");
goto out;
}
/*
* All events are considered to be slow (RX/TX ints do not generate
* events) and are handled here, outside the main interrupt handler,
* to reduce the size of the handler.
*/
static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
{
struct ace_private *ap;
ap = netdev_priv(dev);
while (evtcsm != evtprd) {
switch (ap->evt_ring[evtcsm].evt) {
case E_FW_RUNNING:
printk(KERN_INFO "%s: Firmware up and running\n",
ap->name);
ap->fw_running = 1;
wmb();
break;
case E_STATS_UPDATED:
break;
case E_LNK_STATE:
{
u16 code = ap->evt_ring[evtcsm].code;
switch (code) {
case E_C_LINK_UP:
{
u32 state = readl(&ap->regs->GigLnkState);
printk(KERN_WARNING "%s: Optical link UP "
"(%s Duplex, Flow Control: %s%s)\n",
ap->name,
state & LNK_FULL_DUPLEX ? "Full":"Half",
state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
break;
}
case E_C_LINK_DOWN:
printk(KERN_WARNING "%s: Optical link DOWN\n",
ap->name);
break;
case E_C_LINK_10_100:
printk(KERN_WARNING "%s: 10/100BaseT link "
"UP\n", ap->name);
break;
default:
printk(KERN_ERR "%s: Unknown optical link "
"state %02x\n", ap->name, code);
}
break;
}
case E_ERROR:
switch(ap->evt_ring[evtcsm].code) {
case E_C_ERR_INVAL_CMD:
printk(KERN_ERR "%s: invalid command error\n",
ap->name);
break;
case E_C_ERR_UNIMP_CMD:
printk(KERN_ERR "%s: unimplemented command "
"error\n", ap->name);
break;
case E_C_ERR_BAD_CFG:
printk(KERN_ERR "%s: bad config error\n",
ap->name);
break;
default:
printk(KERN_ERR "%s: unknown error %02x\n",
ap->name, ap->evt_ring[evtcsm].code);
}
break;
case E_RESET_JUMBO_RNG:
{
int i;
for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
if (ap->skb->rx_jumbo_skbuff[i].skb) {
ap->rx_jumbo_ring[i].size = 0;
set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
ap->skb->rx_jumbo_skbuff[i].skb = NULL;
}
}
if (ACE_IS_TIGON_I(ap)) {
struct cmd cmd;
cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(ap->regs, &cmd);
} else {
writel(0, &((ap->regs)->RxJumboPrd));
wmb();
}
ap->jumbo = 0;
ap->rx_jumbo_skbprd = 0;
printk(KERN_INFO "%s: Jumbo ring flushed\n",
ap->name);
clear_bit(0, &ap->jumbo_refill_busy);
break;
}
default:
printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
ap->name, ap->evt_ring[evtcsm].evt);
}
evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
}
return evtcsm;
}
static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
{
struct ace_private *ap = netdev_priv(dev);
u32 idx;
int mini_count = 0, std_count = 0;
idx = rxretcsm;
prefetchw(&ap->cur_rx_bufs);
prefetchw(&ap->cur_mini_bufs);
while (idx != rxretprd) {
struct ring_info *rip;
struct sk_buff *skb;
struct rx_desc *rxdesc, *retdesc;
u32 skbidx;
int bd_flags, desc_type, mapsize;
u16 csum;
/* make sure the rx descriptor isn't read before rxretprd */
if (idx == rxretcsm)
rmb();
retdesc = &ap->rx_return_ring[idx];
skbidx = retdesc->idx;
bd_flags = retdesc->flags;
desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
switch(desc_type) {
/*
* Normal frames do not have any flags set
*
* Mini and normal frames arrive frequently,
* so use a local counter to avoid doing
* atomic operations for each packet arriving.
*/
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
mapsize = ACE_STD_BUFSIZE;
rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
mapsize = ACE_JUMBO_BUFSIZE;
rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
mapsize = ACE_MINI_BUFSIZE;
rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
default:
printk(KERN_INFO "%s: unknown frame type (0x%02x) "
"returned by NIC\n", dev->name,
retdesc->flags);
goto error;
}
skb = rip->skb;
rip->skb = NULL;
pci_unmap_page(ap->pdev,
dma_unmap_addr(rip, mapping),
mapsize,
PCI_DMA_FROMDEVICE);
skb_put(skb, retdesc->size);
/*
* Fly baby, fly!
*/
csum = retdesc->tcp_udp_csum;
skb->protocol = eth_type_trans(skb, dev);
/*
* Instead of forcing the poor tigon mips cpu to calculate
* pseudo hdr checksum, we do this ourselves.
*/
if (bd_flags & BD_FLG_TCP_UDP_SUM) {
skb->csum = htons(csum);
skb->ip_summed = CHECKSUM_COMPLETE;
} else {
skb_checksum_none_assert(skb);
}
/* send it up */
if ((bd_flags & BD_FLG_VLAN_TAG))
__vlan_hwaccel_put_tag(skb, retdesc->vlan);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += retdesc->size;
idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
}
atomic_sub(std_count, &ap->cur_rx_bufs);
if (!ACE_IS_TIGON_I(ap))
atomic_sub(mini_count, &ap->cur_mini_bufs);
out:
/*
* According to the documentation RxRetCsm is obsolete with
* the 12.3.x Firmware - my Tigon I NICs seem to disagree!
*/
if (ACE_IS_TIGON_I(ap)) {
writel(idx, &ap->regs->RxRetCsm);
}
ap->cur_rx = idx;
return;
error:
idx = rxretprd;
goto out;
}
static inline void ace_tx_int(struct net_device *dev,
u32 txcsm, u32 idx)
{
struct ace_private *ap = netdev_priv(dev);
do {
struct sk_buff *skb;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + idx;
skb = info->skb;
if (dma_unmap_len(info, maplen)) {
pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dev_kfree_skb_irq(skb);
info->skb = NULL;
}
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
} while (idx != txcsm);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
wmb();
ap->tx_ret_csm = txcsm;
/* So... tx_ret_csm is advanced _after_ check for device wakeup.
*
* We could try to make it before. In this case we would get
* the following race condition: hard_start_xmit on other cpu
* enters after we advanced tx_ret_csm and fills space,
* which we have just freed, so that we make illegal device wakeup.
* There is no good way to workaround this (at entry
* to ace_start_xmit detects this condition and prevents
* ring corruption, but it is not a good workaround.)
*
* When tx_ret_csm is advanced after, we wake up device _only_
* if we really have some space in ring (though the core doing
* hard_start_xmit can see full ring for some period and has to
* synchronize.) Superb.
* BUT! We get another subtle race condition. hard_start_xmit
* may think that ring is full between wakeup and advancing
* tx_ret_csm and will stop device instantly! It is not so bad.
* We are guaranteed that there is something in ring, so that
* the next irq will resume transmission. To speedup this we could
* mark descriptor, which closes ring with BD_FLG_COAL_NOW
* (see ace_start_xmit).
*
* Well, this dilemma exists in all lock-free devices.
* We, following scheme used in drivers by Donald Becker,
* select the least dangerous.
* --ANK
*/
}
static irqreturn_t ace_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
u32 idx;
u32 txcsm, rxretcsm, rxretprd;
u32 evtcsm, evtprd;
/*
* In case of PCI shared interrupts or spurious interrupts,
* we want to make sure it is actually our interrupt before
* spending any time in here.
*/
if (!(readl(®s->HostCtrl) & IN_INT))
return IRQ_NONE;
/*
* ACK intr now. Otherwise we will lose updates to rx_ret_prd,
* which happened _after_ rxretprd = *ap->rx_ret_prd; but before
* writel(0, ®s->Mb0Lo).
*
* "IRQ avoidance" recommended in docs applies to IRQs served
* threads and it is wrong even for that case.
*/
writel(0, ®s->Mb0Lo);
readl(®s->Mb0Lo);
/*
* There is no conflict between transmit handling in
* start_xmit and receive processing, thus there is no reason
* to take a spin lock for RX handling. Wait until we start
* working on the other stuff - hey we don't need a spin lock
* anymore.
*/
rxretprd = *ap->rx_ret_prd;
rxretcsm = ap->cur_rx;
if (rxretprd != rxretcsm)
ace_rx_int(dev, rxretprd, rxretcsm);
txcsm = *ap->tx_csm;
idx = ap->tx_ret_csm;
if (txcsm != idx) {
/*
* If each skb takes only one descriptor this check degenerates
* to identity, because new space has just been opened.
* But if skbs are fragmented we must check that this index
* update releases enough of space, otherwise we just
* wait for device to make more work.
*/
if (!tx_ring_full(ap, txcsm, ap->tx_prd))
ace_tx_int(dev, txcsm, idx);
}
evtcsm = readl(®s->EvtCsm);
evtprd = *ap->evt_prd;
if (evtcsm != evtprd) {
evtcsm = ace_handle_event(dev, evtcsm, evtprd);
writel(evtcsm, ®s->EvtCsm);
}
/*
* This has to go last in the interrupt handler and run with
* the spin lock released ... what lock?
*/
if (netif_running(dev)) {
int cur_size;
int run_tasklet = 0;
cur_size = atomic_read(&ap->cur_rx_bufs);
if (cur_size < RX_LOW_STD_THRES) {
if ((cur_size < RX_PANIC_STD_THRES) &&
!test_and_set_bit(0, &ap->std_refill_busy)) {
#ifdef DEBUG
printk("low on std buffers %i\n", cur_size);
#endif
ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
run_tasklet = 1;
}
if (!ACE_IS_TIGON_I(ap)) {
cur_size = atomic_read(&ap->cur_mini_bufs);
if (cur_size < RX_LOW_MINI_THRES) {
if ((cur_size < RX_PANIC_MINI_THRES) &&
!test_and_set_bit(0,
&ap->mini_refill_busy)) {
#ifdef DEBUG
printk("low on mini buffers %i\n",
cur_size);
#endif
ace_load_mini_rx_ring(dev,
RX_MINI_SIZE - cur_size);
} else
run_tasklet = 1;
}
}
if (ap->jumbo) {
cur_size = atomic_read(&ap->cur_jumbo_bufs);
if (cur_size < RX_LOW_JUMBO_THRES) {
if ((cur_size < RX_PANIC_JUMBO_THRES) &&
!test_and_set_bit(0,
&ap->jumbo_refill_busy)){
#ifdef DEBUG
printk("low on jumbo buffers %i\n",
cur_size);
#endif
ace_load_jumbo_rx_ring(dev,
RX_JUMBO_SIZE - cur_size);
} else
run_tasklet = 1;
}
}
if (run_tasklet && !ap->tasklet_pending) {
ap->tasklet_pending = 1;
tasklet_schedule(&ap->ace_tasklet);
}
}
return IRQ_HANDLED;
}
static int ace_open(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct cmd cmd;
if (!(ap->fw_running)) {
printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
return -EBUSY;
}
writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu);
cmd.evt = C_CLEAR_STATS;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
cmd.evt = C_HOST_STATE;
cmd.code = C_C_STACK_UP;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
if (ap->jumbo &&
!test_and_set_bit(0, &ap->jumbo_refill_busy))
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
if (dev->flags & IFF_PROMISC) {
cmd.evt = C_SET_PROMISC_MODE;
cmd.code = C_C_PROMISC_ENABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->promisc = 1;
}else
ap->promisc = 0;
ap->mcast_all = 0;
#if 0
cmd.evt = C_LNK_NEGOTIATION;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
#endif
netif_start_queue(dev);
/*
* Setup the bottom half rx ring refill handler
*/
tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
return 0;
}
static int ace_close(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct cmd cmd;
unsigned long flags;
short i;
/*
* Without (or before) releasing irq and stopping hardware, this
* is an absolute non-sense, by the way. It will be reset instantly
* by the first irq.
*/
netif_stop_queue(dev);
if (ap->promisc) {
cmd.evt = C_SET_PROMISC_MODE;
cmd.code = C_C_PROMISC_DISABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->promisc = 0;
}
cmd.evt = C_HOST_STATE;
cmd.code = C_C_STACK_DOWN;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
tasklet_kill(&ap->ace_tasklet);
/*
* Make sure one CPU is not processing packets while
* buffers are being released by another.
*/
local_irq_save(flags);
ace_mask_irq(dev);
for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
struct sk_buff *skb;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + i;
skb = info->skb;
if (dma_unmap_len(info, maplen)) {
if (ACE_IS_TIGON_I(ap)) {
/* NB: TIGON_1 is special, tx_ring is in io space */
struct tx_desc __iomem *tx;
tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
writel(0, &tx->addr.addrhi);
writel(0, &tx->addr.addrlo);
writel(0, &tx->flagsize);
} else
memset(ap->tx_ring + i, 0,
sizeof(struct tx_desc));
pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
dev_kfree_skb(skb);
info->skb = NULL;
}
}
if (ap->jumbo) {
cmd.evt = C_RESET_JUMBO_RNG;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}
ace_unmask_irq(dev);
local_irq_restore(flags);
return 0;
}
static inline dma_addr_t
ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
struct sk_buff *tail, u32 idx)
{
dma_addr_t mapping;
struct tx_ring_info *info;
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
skb->len, PCI_DMA_TODEVICE);
info = ap->skb->tx_skbuff + idx;
info->skb = tail;
dma_unmap_addr_set(info, mapping, mapping);
dma_unmap_len_set(info, maplen, skb->len);
return mapping;
}
static inline void
ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
u32 flagsize, u32 vlan_tag)
{
#if !USE_TX_COAL_NOW
flagsize &= ~BD_FLG_COAL_NOW;
#endif
if (ACE_IS_TIGON_I(ap)) {
struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc;
writel(addr >> 32, &io->addr.addrhi);
writel(addr & 0xffffffff, &io->addr.addrlo);
writel(flagsize, &io->flagsize);
writel(vlan_tag, &io->vlanres);
} else {
desc->addr.addrhi = addr >> 32;
desc->addr.addrlo = addr;
desc->flagsize = flagsize;
desc->vlanres = vlan_tag;
}
}
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct tx_desc *desc;
u32 idx, flagsize;
unsigned long maxjiff = jiffies + 3*HZ;
restart:
idx = ap->tx_prd;
if (tx_ring_full(ap, ap->tx_ret_csm, idx))
goto overflow;
if (!skb_shinfo(skb)->nr_frags) {
dma_addr_t mapping;
u32 vlan_tag = 0;
mapping = ace_map_tx_skb(ap, skb, skb, idx);
flagsize = (skb->len << 16) | (BD_FLG_END);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
if (vlan_tx_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
vlan_tag = vlan_tx_tag_get(skb);
}
desc = ap->tx_ring + idx;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
/* Look at ace_tx_int for explanations. */
if (tx_ring_full(ap, ap->tx_ret_csm, idx))
flagsize |= BD_FLG_COAL_NOW;
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
} else {
dma_addr_t mapping;
u32 vlan_tag = 0;
int i, len = 0;
mapping = ace_map_tx_skb(ap, skb, NULL, idx);
flagsize = (skb_headlen(skb) << 16);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
if (vlan_tx_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
vlan_tag = vlan_tx_tag_get(skb);
}
ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
len += frag->size;
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
mapping = pci_map_page(ap->pdev, frag->page,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
flagsize = (frag->size << 16);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
if (i == skb_shinfo(skb)->nr_frags - 1) {
flagsize |= BD_FLG_END;
if (tx_ring_full(ap, ap->tx_ret_csm, idx))
flagsize |= BD_FLG_COAL_NOW;
/*
* Only the last fragment frees
* the skb!
*/
info->skb = skb;
} else {
info->skb = NULL;
}
dma_unmap_addr_set(info, mapping, mapping);
dma_unmap_len_set(info, maplen, frag->size);
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
wmb();
ap->tx_prd = idx;
ace_set_txprd(regs, ap, idx);
if (flagsize & BD_FLG_COAL_NOW) {
netif_stop_queue(dev);
/*
* A TX-descriptor producer (an IRQ) might have gotten
* between, making the ring free again. Since xmit is
* serialized, this is the only situation we have to
* re-test.
*/
if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
netif_wake_queue(dev);
}
return NETDEV_TX_OK;
overflow:
/*
* This race condition is unavoidable with lock-free drivers.
* We wake up the queue _before_ tx_prd is advanced, so that we can
* enter hard_start_xmit too early, while tx ring still looks closed.
* This happens ~1-4 times per 100000 packets, so that we can allow
* to loop syncing to other CPU. Probably, we need an additional
* wmb() in ace_tx_intr as well.
*
* Note that this race is relieved by reserving one more entry
* in tx ring than it is necessary (see original non-SG driver).
* However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which
* is already overkill.
*
* Alternative is to return with 1 not throttling queue. In this
* case loop becomes longer, no more useful effects.
*/
if (time_before(jiffies, maxjiff)) {
barrier();
cpu_relax();
goto restart;
}
/* The ring is stuck full. */
printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
return NETDEV_TX_BUSY;
}
static int ace_change_mtu(struct net_device *dev, int new_mtu)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
if (new_mtu > ACE_JUMBO_MTU)
return -EINVAL;
writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu);
dev->mtu = new_mtu;
if (new_mtu > ACE_STD_MTU) {
if (!(ap->jumbo)) {
printk(KERN_INFO "%s: Enabling Jumbo frame "
"support\n", dev->name);
ap->jumbo = 1;
if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
ace_set_rxtx_parms(dev, 1);
}
} else {
while (test_and_set_bit(0, &ap->jumbo_refill_busy));
ace_sync_irq(dev->irq);
ace_set_rxtx_parms(dev, 0);
if (ap->jumbo) {
struct cmd cmd;
cmd.evt = C_RESET_JUMBO_RNG;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}
}
return 0;
}
static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
u32 link;
memset(ecmd, 0, sizeof(struct ethtool_cmd));
ecmd->supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_FIBRE);
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_INTERNAL;
link = readl(®s->GigLnkState);
if (link & LNK_1000MB)
ethtool_cmd_speed_set(ecmd, SPEED_1000);
else {
link = readl(®s->FastLnkState);
if (link & LNK_100MB)
ethtool_cmd_speed_set(ecmd, SPEED_100);
else if (link & LNK_10MB)
ethtool_cmd_speed_set(ecmd, SPEED_10);
else
ethtool_cmd_speed_set(ecmd, 0);
}
if (link & LNK_FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
if (link & LNK_NEGOTIATE)
ecmd->autoneg = AUTONEG_ENABLE;
else
ecmd->autoneg = AUTONEG_DISABLE;
#if 0
/*
* Current struct ethtool_cmd is insufficient
*/
ecmd->trace = readl(®s->TuneTrace);
ecmd->txcoal = readl(®s->TuneTxCoalTicks);
ecmd->rxcoal = readl(®s->TuneRxCoalTicks);
#endif
ecmd->maxtxpkt = readl(®s->TuneMaxTxDesc);
ecmd->maxrxpkt = readl(®s->TuneMaxRxDesc);
return 0;
}
static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
u32 link, speed;
link = readl(®s->GigLnkState);
if (link & LNK_1000MB)
speed = SPEED_1000;
else {
link = readl(®s->FastLnkState);
if (link & LNK_100MB)
speed = SPEED_100;
else if (link & LNK_10MB)
speed = SPEED_10;
else
speed = SPEED_100;
}
link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
if (!ACE_IS_TIGON_I(ap))
link |= LNK_TX_FLOW_CTL_Y;
if (ecmd->autoneg == AUTONEG_ENABLE)
link |= LNK_NEGOTIATE;
if (ethtool_cmd_speed(ecmd) != speed) {
link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
switch (ethtool_cmd_speed(ecmd)) {
case SPEED_1000:
link |= LNK_1000MB;
break;
case SPEED_100:
link |= LNK_100MB;
break;
case SPEED_10:
link |= LNK_10MB;
break;
}
}
if (ecmd->duplex == DUPLEX_FULL)
link |= LNK_FULL_DUPLEX;
if (link != ap->link) {
struct cmd cmd;
printk(KERN_INFO "%s: Renegotiating link state\n",
dev->name);
ap->link = link;
writel(link, ®s->TuneLink);
if (!ACE_IS_TIGON_I(ap))
writel(link, ®s->TuneFastLink);
wmb();
cmd.evt = C_LNK_NEGOTIATION;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}
return 0;
}
static void ace_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct ace_private *ap = netdev_priv(dev);
strlcpy(info->driver, "acenic", sizeof(info->driver));
snprintf(info->version, sizeof(info->version), "%i.%i.%i",
ap->firmware_major, ap->firmware_minor,
ap->firmware_fix);
if (ap->pdev)
strlcpy(info->bus_info, pci_name(ap->pdev),
sizeof(info->bus_info));
}
/*
* Set the hardware MAC address.
*/
static int ace_set_mac_addr(struct net_device *dev, void *p)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct sockaddr *addr=p;
u8 *da;
struct cmd cmd;
if(netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
da = (u8 *)dev->dev_addr;
writel(da[0] << 8 | da[1], ®s->MacAddrHi);
writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
®s->MacAddrLo);
cmd.evt = C_SET_MAC_ADDR;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
return 0;
}
static void ace_set_multicast_list(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct cmd cmd;
if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_ENABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->mcast_all = 1;
} else if (ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_DISABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->mcast_all = 0;
}
if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
cmd.evt = C_SET_PROMISC_MODE;
cmd.code = C_C_PROMISC_ENABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->promisc = 1;
}else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
cmd.evt = C_SET_PROMISC_MODE;
cmd.code = C_C_PROMISC_DISABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->promisc = 0;
}
/*
* For the time being multicast relies on the upper layers
* filtering it properly. The Firmware does not allow one to
* set the entire multicast list at a time and keeping track of
* it here is going to be messy.
*/
if (!netdev_mc_empty(dev) && !ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_ENABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}else if (!ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_DISABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}
}
static struct net_device_stats *ace_get_stats(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_mac_stats __iomem *mac_stats =
(struct ace_mac_stats __iomem *)ap->regs->Stats;
dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
dev->stats.multicast = readl(&mac_stats->kept_mc);
dev->stats.collisions = readl(&mac_stats->coll);
return &dev->stats;
}
static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
u32 dest, int size)
{
void __iomem *tdest;
short tsize, i;
if (size <= 0)
return;
while (size > 0) {
tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
min_t(u32, size, ACE_WINDOW_SIZE));
tdest = (void __iomem *) ®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
for (i = 0; i < (tsize / 4); i++) {
/* Firmware is big-endian */
writel(be32_to_cpup(src), tdest);
src++;
tdest += 4;
dest += 4;
size -= 4;
}
}
}
static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
{
void __iomem *tdest;
short tsize = 0, i;
if (size <= 0)
return;
while (size > 0) {
tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
min_t(u32, size, ACE_WINDOW_SIZE));
tdest = (void __iomem *) ®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
for (i = 0; i < (tsize / 4); i++) {
writel(0, tdest + i*4);
}
dest += tsize;
size -= tsize;
}
}
/*
* Download the firmware into the SRAM on the NIC
*
* This operation requires the NIC to be halted and is performed with
* interrupts disabled and with the spinlock hold.
*/
static int __devinit ace_load_firmware(struct net_device *dev)
{
const struct firmware *fw;
const char *fw_name = "acenic/tg2.bin";
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
const __be32 *fw_data;
u32 load_addr;
int ret;
if (!(readl(®s->CpuCtrl) & CPU_HALTED)) {
printk(KERN_ERR "%s: trying to download firmware while the "
"CPU is running!\n", ap->name);
return -EFAULT;
}
if (ACE_IS_TIGON_I(ap))
fw_name = "acenic/tg1.bin";
ret = request_firmware(&fw, fw_name, &ap->pdev->dev);
if (ret) {
printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
ap->name, fw_name);
return ret;
}
fw_data = (void *)fw->data;
/* Firmware blob starts with version numbers, followed by
load and start address. Remainder is the blob to be loaded
contiguously from load address. We don't bother to represent
the BSS/SBSS sections any more, since we were clearing the
whole thing anyway. */
ap->firmware_major = fw->data[0];
ap->firmware_minor = fw->data[1];
ap->firmware_fix = fw->data[2];
ap->firmware_start = be32_to_cpu(fw_data[1]);
if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) {
printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
ap->name, ap->firmware_start, fw_name);
ret = -EINVAL;
goto out;
}
load_addr = be32_to_cpu(fw_data[2]);
if (load_addr < 0x4000 || load_addr >= 0x80000) {
printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
ap->name, load_addr, fw_name);
ret = -EINVAL;
goto out;
}
/*
* Do not try to clear more than 512KiB or we end up seeing
* funny things on NICs with only 512KiB SRAM
*/
ace_clear(regs, 0x2000, 0x80000-0x2000);
ace_copy(regs, &fw_data[3], load_addr, fw->size-12);
out:
release_firmware(fw);
return ret;
}
/*
* The eeprom on the AceNIC is an Atmel i2c EEPROM.
*
* Accessing the EEPROM is `interesting' to say the least - don't read
* this code right after dinner.
*
* This is all about black magic and bit-banging the device .... I
* wonder in what hospital they have put the guy who designed the i2c
* specs.
*
* Oh yes, this is only the beginning!
*
* Thanks to Stevarino Webinski for helping tracking down the bugs in the
* code i2c readout code by beta testing all my hacks.
*/
static void __devinit eeprom_start(struct ace_regs __iomem *regs)
{
u32 local;
readl(®s->LocalCtrl);
udelay(ACE_SHORT_DELAY);
local = readl(®s->LocalCtrl);
local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local &= ~EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local &= ~EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
}
static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
{
short i;
u32 local;
udelay(ACE_SHORT_DELAY);
local = readl(®s->LocalCtrl);
local &= ~EEPROM_DATA_OUT;
local |= EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
for (i = 0; i < 8; i++, magic <<= 1) {
udelay(ACE_SHORT_DELAY);
if (magic & 0x80)
local |= EEPROM_DATA_OUT;
else
local &= ~EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
}
}
static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs)
{
int state;
u32 local;
local = readl(®s->LocalCtrl);
local &= ~EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_LONG_DELAY);
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
/* sample data in middle of high clk */
state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0;
udelay(ACE_SHORT_DELAY);
mb();
writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
return state;
}
static void __devinit eeprom_stop(struct ace_regs __iomem *regs)
{
u32 local;
udelay(ACE_SHORT_DELAY);
local = readl(®s->LocalCtrl);
local |= EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local &= ~EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local |= EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_LONG_DELAY);
local &= ~EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
mb();
}
/*
* Read a whole byte from the EEPROM.
*/
static int __devinit read_eeprom_byte(struct net_device *dev,
unsigned long offset)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
unsigned long flags;
u32 local;
int result = 0;
short i;
/*
* Don't take interrupts on this CPU will bit banging
* the %#%#@$ I2C device
*/
local_irq_save(flags);
eeprom_start(regs);
eeprom_prep(regs, EEPROM_WRITE_SELECT);
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
result = -EIO;
goto eeprom_read_error;
}
eeprom_prep(regs, (offset >> 8) & 0xff);
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to set address byte 0\n",
ap->name);
result = -EIO;
goto eeprom_read_error;
}
eeprom_prep(regs, offset & 0xff);
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to set address byte 1\n",
ap->name);
result = -EIO;
goto eeprom_read_error;
}
eeprom_start(regs);
eeprom_prep(regs, EEPROM_READ_SELECT);
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
ap->name);
result = -EIO;
goto eeprom_read_error;
}
for (i = 0; i < 8; i++) {
local = readl(®s->LocalCtrl);
local &= ~EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
udelay(ACE_LONG_DELAY);
mb();
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
/* sample data mid high clk */
result = (result << 1) |
((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0);
udelay(ACE_SHORT_DELAY);
mb();
local = readl(®s->LocalCtrl);
local &= ~EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
udelay(ACE_SHORT_DELAY);
mb();
if (i == 7) {
local |= EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
}
}
local |= EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl);
readl(®s->LocalCtrl);
udelay(ACE_LONG_DELAY);
writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
eeprom_stop(regs);
local_irq_restore(flags);
out:
return result;
eeprom_read_error:
printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
ap->name, offset);
goto out;
}
| gpl-2.0 |
Californication/lge-kernel-msm7x27-SDSL | arch/powerpc/kvm/booke_emulate.c | 898 | 6850 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#include <linux/kvm_host.h>
#include <asm/disassemble.h>
#include "booke.h"
#define OP_19_XOP_RFI 50
#define OP_31_XOP_MFMSR 83
#define OP_31_XOP_WRTEE 131
#define OP_31_XOP_MTMSR 146
#define OP_31_XOP_WRTEEI 163
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.srr0;
kvmppc_set_msr(vcpu, vcpu->arch.srr1);
}
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int rs;
int rt;
switch (get_op(inst)) {
case 19:
switch (get_xop(inst)) {
case OP_19_XOP_RFI:
kvmppc_emul_rfi(vcpu);
kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS);
*advance = 0;
break;
default:
emulated = EMULATE_FAIL;
break;
}
break;
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
rt = get_rt(inst);
kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr);
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case OP_31_XOP_MTMSR:
rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_WRTEE:
rs = get_rs(inst);
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
| (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
case OP_31_XOP_WRTEEI:
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
| (inst & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_DEAR:
vcpu->arch.dear = spr_val; break;
case SPRN_ESR:
vcpu->arch.esr = spr_val; break;
case SPRN_DBCR0:
vcpu->arch.dbcr0 = spr_val; break;
case SPRN_DBCR1:
vcpu->arch.dbcr1 = spr_val; break;
case SPRN_DBSR:
vcpu->arch.dbsr &= ~spr_val; break;
case SPRN_TSR:
vcpu->arch.tsr &= ~spr_val; break;
case SPRN_TCR:
vcpu->arch.tcr = spr_val;
kvmppc_emulate_dec(vcpu);
break;
/* Note: SPRG4-7 are user-readable. These values are
* loaded into the real SPRGs when resuming the
* guest. */
case SPRN_SPRG4:
vcpu->arch.sprg4 = spr_val; break;
case SPRN_SPRG5:
vcpu->arch.sprg5 = spr_val; break;
case SPRN_SPRG6:
vcpu->arch.sprg6 = spr_val; break;
case SPRN_SPRG7:
vcpu->arch.sprg7 = spr_val; break;
case SPRN_IVPR:
vcpu->arch.ivpr = spr_val;
break;
case SPRN_IVOR0:
vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
break;
case SPRN_IVOR1:
vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
break;
case SPRN_IVOR2:
vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
break;
case SPRN_IVOR3:
vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
break;
case SPRN_IVOR4:
vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val;
break;
case SPRN_IVOR5:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val;
break;
case SPRN_IVOR6:
vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val;
break;
case SPRN_IVOR7:
vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR8:
vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
break;
case SPRN_IVOR9:
vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR10:
vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val;
break;
case SPRN_IVOR11:
vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val;
break;
case SPRN_IVOR12:
vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val;
break;
case SPRN_IVOR13:
vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val;
break;
case SPRN_IVOR14:
vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val;
break;
case SPRN_IVOR15:
vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_IVPR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
case SPRN_DEAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break;
case SPRN_ESR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
case SPRN_DBCR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
case SPRN_DBCR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
case SPRN_DBSR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
case SPRN_IVOR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
break;
case SPRN_IVOR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
break;
case SPRN_IVOR2:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
break;
case SPRN_IVOR3:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
break;
case SPRN_IVOR4:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
break;
case SPRN_IVOR5:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
break;
case SPRN_IVOR6:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
break;
case SPRN_IVOR7:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
break;
case SPRN_IVOR8:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
break;
case SPRN_IVOR9:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
break;
case SPRN_IVOR10:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
break;
case SPRN_IVOR11:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
break;
case SPRN_IVOR12:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
break;
case SPRN_IVOR13:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
break;
case SPRN_IVOR14:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
break;
case SPRN_IVOR15:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
| gpl-2.0 |
djvoleur/V_925R4_BOF7 | drivers/mfd/sm501.c | 1922 | 41719 | /* linux/drivers/mfd/sm501.c
*
* Copyright (C) 2006 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* Vincent Sanders <vince@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* SM501 MFD driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/i2c-gpio.h>
#include <linux/slab.h>
#include <linux/sm501.h>
#include <linux/sm501-regs.h>
#include <linux/serial_8250.h>
#include <linux/io.h>
struct sm501_device {
struct list_head list;
struct platform_device pdev;
};
struct sm501_gpio;
#ifdef CONFIG_MFD_SM501_GPIO
#include <linux/gpio.h>
struct sm501_gpio_chip {
struct gpio_chip gpio;
struct sm501_gpio *ourgpio; /* to get back to parent. */
void __iomem *regbase;
void __iomem *control; /* address of control reg. */
};
struct sm501_gpio {
struct sm501_gpio_chip low;
struct sm501_gpio_chip high;
spinlock_t lock;
unsigned int registered : 1;
void __iomem *regs;
struct resource *regs_res;
};
#else
struct sm501_gpio {
/* no gpio support, empty definition for sm501_devdata. */
};
#endif
struct sm501_devdata {
spinlock_t reg_lock;
struct mutex clock_lock;
struct list_head devices;
struct sm501_gpio gpio;
struct device *dev;
struct resource *io_res;
struct resource *mem_res;
struct resource *regs_claim;
struct sm501_platdata *platdata;
unsigned int in_suspend;
unsigned long pm_misc;
int unit_power[20];
unsigned int pdev_id;
unsigned int irq;
void __iomem *regs;
unsigned int rev;
};
#define MHZ (1000 * 1000)
#ifdef DEBUG
static const unsigned int div_tab[] = {
[0] = 1,
[1] = 2,
[2] = 4,
[3] = 8,
[4] = 16,
[5] = 32,
[6] = 64,
[7] = 128,
[8] = 3,
[9] = 6,
[10] = 12,
[11] = 24,
[12] = 48,
[13] = 96,
[14] = 192,
[15] = 384,
[16] = 5,
[17] = 10,
[18] = 20,
[19] = 40,
[20] = 80,
[21] = 160,
[22] = 320,
[23] = 604,
};
static unsigned long decode_div(unsigned long pll2, unsigned long val,
unsigned int lshft, unsigned int selbit,
unsigned long mask)
{
if (val & selbit)
pll2 = 288 * MHZ;
return pll2 / div_tab[(val >> lshft) & mask];
}
#define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x)
/* sm501_dump_clk
*
* Print out the current clock configuration for the device
*/
static void sm501_dump_clk(struct sm501_devdata *sm)
{
unsigned long misct = smc501_readl(sm->regs + SM501_MISC_TIMING);
unsigned long pm0 = smc501_readl(sm->regs + SM501_POWER_MODE_0_CLOCK);
unsigned long pm1 = smc501_readl(sm->regs + SM501_POWER_MODE_1_CLOCK);
unsigned long pmc = smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL);
unsigned long sdclk0, sdclk1;
unsigned long pll2 = 0;
switch (misct & 0x30) {
case 0x00:
pll2 = 336 * MHZ;
break;
case 0x10:
pll2 = 288 * MHZ;
break;
case 0x20:
pll2 = 240 * MHZ;
break;
case 0x30:
pll2 = 192 * MHZ;
break;
}
sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ;
sdclk0 /= div_tab[((misct >> 8) & 0xf)];
sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ;
sdclk1 /= div_tab[((misct >> 16) & 0xf)];
dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n",
misct, pm0, pm1);
dev_dbg(sm->dev, "PLL2 = %ld.%ld MHz (%ld), SDCLK0=%08lx, SDCLK1=%08lx\n",
fmt_freq(pll2), sdclk0, sdclk1);
dev_dbg(sm->dev, "SDRAM: PM0=%ld, PM1=%ld\n", sdclk0, sdclk1);
dev_dbg(sm->dev, "PM0[%c]: "
"P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), "
"M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n",
(pmc & 3 ) == 0 ? '*' : '-',
fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31)),
fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15)),
fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15)),
fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15)));
dev_dbg(sm->dev, "PM1[%c]: "
"P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), "
"M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n",
(pmc & 3 ) == 1 ? '*' : '-',
fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31)),
fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15)),
fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15)),
fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15)));
}
static void sm501_dump_regs(struct sm501_devdata *sm)
{
void __iomem *regs = sm->regs;
dev_info(sm->dev, "System Control %08x\n",
smc501_readl(regs + SM501_SYSTEM_CONTROL));
dev_info(sm->dev, "Misc Control %08x\n",
smc501_readl(regs + SM501_MISC_CONTROL));
dev_info(sm->dev, "GPIO Control Low %08x\n",
smc501_readl(regs + SM501_GPIO31_0_CONTROL));
dev_info(sm->dev, "GPIO Control Hi %08x\n",
smc501_readl(regs + SM501_GPIO63_32_CONTROL));
dev_info(sm->dev, "DRAM Control %08x\n",
smc501_readl(regs + SM501_DRAM_CONTROL));
dev_info(sm->dev, "Arbitration Ctrl %08x\n",
smc501_readl(regs + SM501_ARBTRTN_CONTROL));
dev_info(sm->dev, "Misc Timing %08x\n",
smc501_readl(regs + SM501_MISC_TIMING));
}
static void sm501_dump_gate(struct sm501_devdata *sm)
{
dev_info(sm->dev, "CurrentGate %08x\n",
smc501_readl(sm->regs + SM501_CURRENT_GATE));
dev_info(sm->dev, "CurrentClock %08x\n",
smc501_readl(sm->regs + SM501_CURRENT_CLOCK));
dev_info(sm->dev, "PowerModeControl %08x\n",
smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL));
}
#else
static inline void sm501_dump_gate(struct sm501_devdata *sm) { }
static inline void sm501_dump_regs(struct sm501_devdata *sm) { }
static inline void sm501_dump_clk(struct sm501_devdata *sm) { }
#endif
/* sm501_sync_regs
*
* ensure the
*/
static void sm501_sync_regs(struct sm501_devdata *sm)
{
smc501_readl(sm->regs);
}
static inline void sm501_mdelay(struct sm501_devdata *sm, unsigned int delay)
{
/* during suspend/resume, we are currently not allowed to sleep,
* so change to using mdelay() instead of msleep() if we
* are in one of these paths */
if (sm->in_suspend)
mdelay(delay);
else
msleep(delay);
}
/* sm501_misc_control
*
* alters the miscellaneous control parameters
*/
int sm501_misc_control(struct device *dev,
unsigned long set, unsigned long clear)
{
struct sm501_devdata *sm = dev_get_drvdata(dev);
unsigned long misc;
unsigned long save;
unsigned long to;
spin_lock_irqsave(&sm->reg_lock, save);
misc = smc501_readl(sm->regs + SM501_MISC_CONTROL);
to = (misc & ~clear) | set;
if (to != misc) {
smc501_writel(to, sm->regs + SM501_MISC_CONTROL);
sm501_sync_regs(sm);
dev_dbg(sm->dev, "MISC_CONTROL %08lx\n", misc);
}
spin_unlock_irqrestore(&sm->reg_lock, save);
return to;
}
EXPORT_SYMBOL_GPL(sm501_misc_control);
/* sm501_modify_reg
*
* Modify a register in the SM501 which may be shared with other
* drivers.
*/
unsigned long sm501_modify_reg(struct device *dev,
unsigned long reg,
unsigned long set,
unsigned long clear)
{
struct sm501_devdata *sm = dev_get_drvdata(dev);
unsigned long data;
unsigned long save;
spin_lock_irqsave(&sm->reg_lock, save);
data = smc501_readl(sm->regs + reg);
data |= set;
data &= ~clear;
smc501_writel(data, sm->regs + reg);
sm501_sync_regs(sm);
spin_unlock_irqrestore(&sm->reg_lock, save);
return data;
}
EXPORT_SYMBOL_GPL(sm501_modify_reg);
/* sm501_unit_power
*
* alters the power active gate to set specific units on or off
*/
int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
{
struct sm501_devdata *sm = dev_get_drvdata(dev);
unsigned long mode;
unsigned long gate;
unsigned long clock;
mutex_lock(&sm->clock_lock);
mode = smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL);
gate = smc501_readl(sm->regs + SM501_CURRENT_GATE);
clock = smc501_readl(sm->regs + SM501_CURRENT_CLOCK);
mode &= 3; /* get current power mode */
if (unit >= ARRAY_SIZE(sm->unit_power)) {
dev_err(dev, "%s: bad unit %d\n", __func__, unit);
goto already;
}
dev_dbg(sm->dev, "%s: unit %d, cur %d, to %d\n", __func__, unit,
sm->unit_power[unit], to);
if (to == 0 && sm->unit_power[unit] == 0) {
dev_err(sm->dev, "unit %d is already shutdown\n", unit);
goto already;
}
sm->unit_power[unit] += to ? 1 : -1;
to = sm->unit_power[unit] ? 1 : 0;
if (to) {
if (gate & (1 << unit))
goto already;
gate |= (1 << unit);
} else {
if (!(gate & (1 << unit)))
goto already;
gate &= ~(1 << unit);
}
switch (mode) {
case 1:
smc501_writel(gate, sm->regs + SM501_POWER_MODE_0_GATE);
smc501_writel(clock, sm->regs + SM501_POWER_MODE_0_CLOCK);
mode = 0;
break;
case 2:
case 0:
smc501_writel(gate, sm->regs + SM501_POWER_MODE_1_GATE);
smc501_writel(clock, sm->regs + SM501_POWER_MODE_1_CLOCK);
mode = 1;
break;
default:
gate = -1;
goto already;
}
smc501_writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
sm501_sync_regs(sm);
dev_dbg(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n",
gate, clock, mode);
sm501_mdelay(sm, 16);
already:
mutex_unlock(&sm->clock_lock);
return gate;
}
EXPORT_SYMBOL_GPL(sm501_unit_power);
/* clock value structure. */
struct sm501_clock {
unsigned long mclk;
int divider;
int shift;
unsigned int m, n, k;
};
/* sm501_calc_clock
*
* Calculates the nearest discrete clock frequency that
* can be achieved with the specified input clock.
* the maximum divisor is 3 or 5
*/
static int sm501_calc_clock(unsigned long freq,
struct sm501_clock *clock,
int max_div,
unsigned long mclk,
long *best_diff)
{
int ret = 0;
int divider;
int shift;
long diff;
/* try dividers 1 and 3 for CRT and for panel,
try divider 5 for panel only.*/
for (divider = 1; divider <= max_div; divider += 2) {
/* try all 8 shift values.*/
for (shift = 0; shift < 8; shift++) {
/* Calculate difference to requested clock */
diff = DIV_ROUND_CLOSEST(mclk, divider << shift) - freq;
if (diff < 0)
diff = -diff;
/* If it is less than the current, use it */
if (diff < *best_diff) {
*best_diff = diff;
clock->mclk = mclk;
clock->divider = divider;
clock->shift = shift;
ret = 1;
}
}
}
return ret;
}
/* sm501_calc_pll
*
* Calculates the nearest discrete clock frequency that can be
* achieved using the programmable PLL.
* the maximum divisor is 3 or 5
*/
static unsigned long sm501_calc_pll(unsigned long freq,
struct sm501_clock *clock,
int max_div)
{
unsigned long mclk;
unsigned int m, n, k;
long best_diff = 999999999;
/*
* The SM502 datasheet doesn't specify the min/max values for M and N.
* N = 1 at least doesn't work in practice.
*/
for (m = 2; m <= 255; m++) {
for (n = 2; n <= 127; n++) {
for (k = 0; k <= 1; k++) {
mclk = (24000000UL * m / n) >> k;
if (sm501_calc_clock(freq, clock, max_div,
mclk, &best_diff)) {
clock->m = m;
clock->n = n;
clock->k = k;
}
}
}
}
/* Return best clock. */
return clock->mclk / (clock->divider << clock->shift);
}
/* sm501_select_clock
*
* Calculates the nearest discrete clock frequency that can be
* achieved using the 288MHz and 336MHz PLLs.
* the maximum divisor is 3 or 5
*/
static unsigned long sm501_select_clock(unsigned long freq,
struct sm501_clock *clock,
int max_div)
{
unsigned long mclk;
long best_diff = 999999999;
/* Try 288MHz and 336MHz clocks. */
for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) {
sm501_calc_clock(freq, clock, max_div, mclk, &best_diff);
}
/* Return best clock. */
return clock->mclk / (clock->divider << clock->shift);
}
/* sm501_set_clock
*
* set one of the four clock sources to the closest available frequency to
* the one specified
*/
unsigned long sm501_set_clock(struct device *dev,
int clksrc,
unsigned long req_freq)
{
struct sm501_devdata *sm = dev_get_drvdata(dev);
unsigned long mode = smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL);
unsigned long gate = smc501_readl(sm->regs + SM501_CURRENT_GATE);
unsigned long clock = smc501_readl(sm->regs + SM501_CURRENT_CLOCK);
unsigned char reg;
unsigned int pll_reg = 0;
unsigned long sm501_freq; /* the actual frequency achieved */
struct sm501_clock to;
/* find achivable discrete frequency and setup register value
* accordingly, V2XCLK, MCLK and M1XCLK are the same P2XCLK
* has an extra bit for the divider */
switch (clksrc) {
case SM501_CLOCK_P2XCLK:
/* This clock is divided in half so to achieve the
* requested frequency the value must be multiplied by
* 2. This clock also has an additional pre divisor */
if (sm->rev >= 0xC0) {
/* SM502 -> use the programmable PLL */
sm501_freq = (sm501_calc_pll(2 * req_freq,
&to, 5) / 2);
reg = to.shift & 0x07;/* bottom 3 bits are shift */
if (to.divider == 3)
reg |= 0x08; /* /3 divider required */
else if (to.divider == 5)
reg |= 0x10; /* /5 divider required */
reg |= 0x40; /* select the programmable PLL */
pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m;
} else {
sm501_freq = (sm501_select_clock(2 * req_freq,
&to, 5) / 2);
reg = to.shift & 0x07;/* bottom 3 bits are shift */
if (to.divider == 3)
reg |= 0x08; /* /3 divider required */
else if (to.divider == 5)
reg |= 0x10; /* /5 divider required */
if (to.mclk != 288000000)
reg |= 0x20; /* which mclk pll is source */
}
break;
case SM501_CLOCK_V2XCLK:
/* This clock is divided in half so to achieve the
* requested frequency the value must be multiplied by 2. */
sm501_freq = (sm501_select_clock(2 * req_freq, &to, 3) / 2);
reg=to.shift & 0x07; /* bottom 3 bits are shift */
if (to.divider == 3)
reg |= 0x08; /* /3 divider required */
if (to.mclk != 288000000)
reg |= 0x10; /* which mclk pll is source */
break;
case SM501_CLOCK_MCLK:
case SM501_CLOCK_M1XCLK:
/* These clocks are the same and not further divided */
sm501_freq = sm501_select_clock( req_freq, &to, 3);
reg=to.shift & 0x07; /* bottom 3 bits are shift */
if (to.divider == 3)
reg |= 0x08; /* /3 divider required */
if (to.mclk != 288000000)
reg |= 0x10; /* which mclk pll is source */
break;
default:
return 0; /* this is bad */
}
mutex_lock(&sm->clock_lock);
mode = smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL);
gate = smc501_readl(sm->regs + SM501_CURRENT_GATE);
clock = smc501_readl(sm->regs + SM501_CURRENT_CLOCK);
clock = clock & ~(0xFF << clksrc);
clock |= reg<<clksrc;
mode &= 3; /* find current mode */
switch (mode) {
case 1:
smc501_writel(gate, sm->regs + SM501_POWER_MODE_0_GATE);
smc501_writel(clock, sm->regs + SM501_POWER_MODE_0_CLOCK);
mode = 0;
break;
case 2:
case 0:
smc501_writel(gate, sm->regs + SM501_POWER_MODE_1_GATE);
smc501_writel(clock, sm->regs + SM501_POWER_MODE_1_CLOCK);
mode = 1;
break;
default:
mutex_unlock(&sm->clock_lock);
return -1;
}
smc501_writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
if (pll_reg)
smc501_writel(pll_reg,
sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL);
sm501_sync_regs(sm);
dev_dbg(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n",
gate, clock, mode);
sm501_mdelay(sm, 16);
mutex_unlock(&sm->clock_lock);
sm501_dump_clk(sm);
return sm501_freq;
}
EXPORT_SYMBOL_GPL(sm501_set_clock);
/* sm501_find_clock
*
* finds the closest available frequency for a given clock
*/
unsigned long sm501_find_clock(struct device *dev,
int clksrc,
unsigned long req_freq)
{
struct sm501_devdata *sm = dev_get_drvdata(dev);
unsigned long sm501_freq; /* the frequency achieveable by the 501 */
struct sm501_clock to;
switch (clksrc) {
case SM501_CLOCK_P2XCLK:
if (sm->rev >= 0xC0) {
/* SM502 -> use the programmable PLL */
sm501_freq = (sm501_calc_pll(2 * req_freq,
&to, 5) / 2);
} else {
sm501_freq = (sm501_select_clock(2 * req_freq,
&to, 5) / 2);
}
break;
case SM501_CLOCK_V2XCLK:
sm501_freq = (sm501_select_clock(2 * req_freq, &to, 3) / 2);
break;
case SM501_CLOCK_MCLK:
case SM501_CLOCK_M1XCLK:
sm501_freq = sm501_select_clock(req_freq, &to, 3);
break;
default:
sm501_freq = 0; /* error */
}
return sm501_freq;
}
EXPORT_SYMBOL_GPL(sm501_find_clock);
static struct sm501_device *to_sm_device(struct platform_device *pdev)
{
return container_of(pdev, struct sm501_device, pdev);
}
/* sm501_device_release
*
* A release function for the platform devices we create to allow us to
* free any items we allocated
*/
static void sm501_device_release(struct device *dev)
{
kfree(to_sm_device(to_platform_device(dev)));
}
/* sm501_create_subdev
*
* Create a skeleton platform device with resources for passing to a
* sub-driver
*/
static struct platform_device *
sm501_create_subdev(struct sm501_devdata *sm, char *name,
unsigned int res_count, unsigned int platform_data_size)
{
struct sm501_device *smdev;
smdev = kzalloc(sizeof(struct sm501_device) +
(sizeof(struct resource) * res_count) +
platform_data_size, GFP_KERNEL);
if (!smdev)
return NULL;
smdev->pdev.dev.release = sm501_device_release;
smdev->pdev.name = name;
smdev->pdev.id = sm->pdev_id;
smdev->pdev.dev.parent = sm->dev;
if (res_count) {
smdev->pdev.resource = (struct resource *)(smdev+1);
smdev->pdev.num_resources = res_count;
}
if (platform_data_size)
smdev->pdev.dev.platform_data = (void *)(smdev+1);
return &smdev->pdev;
}
/* sm501_register_device
*
* Register a platform device created with sm501_create_subdev()
*/
static int sm501_register_device(struct sm501_devdata *sm,
struct platform_device *pdev)
{
struct sm501_device *smdev = to_sm_device(pdev);
int ptr;
int ret;
for (ptr = 0; ptr < pdev->num_resources; ptr++) {
printk(KERN_DEBUG "%s[%d] %pR\n",
pdev->name, ptr, &pdev->resource[ptr]);
}
ret = platform_device_register(pdev);
if (ret >= 0) {
dev_dbg(sm->dev, "registered %s\n", pdev->name);
list_add_tail(&smdev->list, &sm->devices);
} else
dev_err(sm->dev, "error registering %s (%d)\n",
pdev->name, ret);
return ret;
}
/* sm501_create_subio
*
* Fill in an IO resource for a sub device
*/
static void sm501_create_subio(struct sm501_devdata *sm,
struct resource *res,
resource_size_t offs,
resource_size_t size)
{
res->flags = IORESOURCE_MEM;
res->parent = sm->io_res;
res->start = sm->io_res->start + offs;
res->end = res->start + size - 1;
}
/* sm501_create_mem
*
* Fill in an MEM resource for a sub device
*/
static void sm501_create_mem(struct sm501_devdata *sm,
struct resource *res,
resource_size_t *offs,
resource_size_t size)
{
*offs -= size; /* adjust memory size */
res->flags = IORESOURCE_MEM;
res->parent = sm->mem_res;
res->start = sm->mem_res->start + *offs;
res->end = res->start + size - 1;
}
/* sm501_create_irq
*
* Fill in an IRQ resource for a sub device
*/
static void sm501_create_irq(struct sm501_devdata *sm,
struct resource *res)
{
res->flags = IORESOURCE_IRQ;
res->parent = NULL;
res->start = res->end = sm->irq;
}
static int sm501_register_usbhost(struct sm501_devdata *sm,
resource_size_t *mem_avail)
{
struct platform_device *pdev;
pdev = sm501_create_subdev(sm, "sm501-usb", 3, 0);
if (!pdev)
return -ENOMEM;
sm501_create_subio(sm, &pdev->resource[0], 0x40000, 0x20000);
sm501_create_mem(sm, &pdev->resource[1], mem_avail, 256*1024);
sm501_create_irq(sm, &pdev->resource[2]);
return sm501_register_device(sm, pdev);
}
static void sm501_setup_uart_data(struct sm501_devdata *sm,
struct plat_serial8250_port *uart_data,
unsigned int offset)
{
uart_data->membase = sm->regs + offset;
uart_data->mapbase = sm->io_res->start + offset;
uart_data->iotype = UPIO_MEM;
uart_data->irq = sm->irq;
uart_data->flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
uart_data->regshift = 2;
uart_data->uartclk = (9600 * 16);
}
static int sm501_register_uart(struct sm501_devdata *sm, int devices)
{
struct platform_device *pdev;
struct plat_serial8250_port *uart_data;
pdev = sm501_create_subdev(sm, "serial8250", 0,
sizeof(struct plat_serial8250_port) * 3);
if (!pdev)
return -ENOMEM;
uart_data = pdev->dev.platform_data;
if (devices & SM501_USE_UART0) {
sm501_setup_uart_data(sm, uart_data++, 0x30000);
sm501_unit_power(sm->dev, SM501_GATE_UART0, 1);
sm501_modify_reg(sm->dev, SM501_IRQ_MASK, 1 << 12, 0);
sm501_modify_reg(sm->dev, SM501_GPIO63_32_CONTROL, 0x01e0, 0);
}
if (devices & SM501_USE_UART1) {
sm501_setup_uart_data(sm, uart_data++, 0x30020);
sm501_unit_power(sm->dev, SM501_GATE_UART1, 1);
sm501_modify_reg(sm->dev, SM501_IRQ_MASK, 1 << 13, 0);
sm501_modify_reg(sm->dev, SM501_GPIO63_32_CONTROL, 0x1e00, 0);
}
pdev->id = PLAT8250_DEV_SM501;
return sm501_register_device(sm, pdev);
}
static int sm501_register_display(struct sm501_devdata *sm,
resource_size_t *mem_avail)
{
struct platform_device *pdev;
pdev = sm501_create_subdev(sm, "sm501-fb", 4, 0);
if (!pdev)
return -ENOMEM;
sm501_create_subio(sm, &pdev->resource[0], 0x80000, 0x10000);
sm501_create_subio(sm, &pdev->resource[1], 0x100000, 0x50000);
sm501_create_mem(sm, &pdev->resource[2], mem_avail, *mem_avail);
sm501_create_irq(sm, &pdev->resource[3]);
return sm501_register_device(sm, pdev);
}
#ifdef CONFIG_MFD_SM501_GPIO
static inline struct sm501_gpio_chip *to_sm501_gpio(struct gpio_chip *gc)
{
return container_of(gc, struct sm501_gpio_chip, gpio);
}
static inline struct sm501_devdata *sm501_gpio_to_dev(struct sm501_gpio *gpio)
{
return container_of(gpio, struct sm501_devdata, gpio);
}
static int sm501_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct sm501_gpio_chip *smgpio = to_sm501_gpio(chip);
unsigned long result;
result = smc501_readl(smgpio->regbase + SM501_GPIO_DATA_LOW);
result >>= offset;
return result & 1UL;
}
static void sm501_gpio_ensure_gpio(struct sm501_gpio_chip *smchip,
unsigned long bit)
{
unsigned long ctrl;
/* check and modify if this pin is not set as gpio. */
if (smc501_readl(smchip->control) & bit) {
dev_info(sm501_gpio_to_dev(smchip->ourgpio)->dev,
"changing mode of gpio, bit %08lx\n", bit);
ctrl = smc501_readl(smchip->control);
ctrl &= ~bit;
smc501_writel(ctrl, smchip->control);
sm501_sync_regs(sm501_gpio_to_dev(smchip->ourgpio));
}
}
static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct sm501_gpio_chip *smchip = to_sm501_gpio(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
unsigned long bit = 1 << offset;
void __iomem *regs = smchip->regbase;
unsigned long save;
unsigned long val;
dev_dbg(sm501_gpio_to_dev(smgpio)->dev, "%s(%p,%d)\n",
__func__, chip, offset);
spin_lock_irqsave(&smgpio->lock, save);
val = smc501_readl(regs + SM501_GPIO_DATA_LOW) & ~bit;
if (value)
val |= bit;
smc501_writel(val, regs);
sm501_sync_regs(sm501_gpio_to_dev(smgpio));
sm501_gpio_ensure_gpio(smchip, bit);
spin_unlock_irqrestore(&smgpio->lock, save);
}
static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
{
struct sm501_gpio_chip *smchip = to_sm501_gpio(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
void __iomem *regs = smchip->regbase;
unsigned long bit = 1 << offset;
unsigned long save;
unsigned long ddr;
dev_dbg(sm501_gpio_to_dev(smgpio)->dev, "%s(%p,%d)\n",
__func__, chip, offset);
spin_lock_irqsave(&smgpio->lock, save);
ddr = smc501_readl(regs + SM501_GPIO_DDR_LOW);
smc501_writel(ddr & ~bit, regs + SM501_GPIO_DDR_LOW);
sm501_sync_regs(sm501_gpio_to_dev(smgpio));
sm501_gpio_ensure_gpio(smchip, bit);
spin_unlock_irqrestore(&smgpio->lock, save);
return 0;
}
static int sm501_gpio_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct sm501_gpio_chip *smchip = to_sm501_gpio(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
unsigned long bit = 1 << offset;
void __iomem *regs = smchip->regbase;
unsigned long save;
unsigned long val;
unsigned long ddr;
dev_dbg(sm501_gpio_to_dev(smgpio)->dev, "%s(%p,%d,%d)\n",
__func__, chip, offset, value);
spin_lock_irqsave(&smgpio->lock, save);
val = smc501_readl(regs + SM501_GPIO_DATA_LOW);
if (value)
val |= bit;
else
val &= ~bit;
smc501_writel(val, regs);
ddr = smc501_readl(regs + SM501_GPIO_DDR_LOW);
smc501_writel(ddr | bit, regs + SM501_GPIO_DDR_LOW);
sm501_sync_regs(sm501_gpio_to_dev(smgpio));
smc501_writel(val, regs + SM501_GPIO_DATA_LOW);
sm501_sync_regs(sm501_gpio_to_dev(smgpio));
spin_unlock_irqrestore(&smgpio->lock, save);
return 0;
}
static struct gpio_chip gpio_chip_template = {
.ngpio = 32,
.direction_input = sm501_gpio_input,
.direction_output = sm501_gpio_output,
.set = sm501_gpio_set,
.get = sm501_gpio_get,
};
static int sm501_gpio_register_chip(struct sm501_devdata *sm,
struct sm501_gpio *gpio,
struct sm501_gpio_chip *chip)
{
struct sm501_platdata *pdata = sm->platdata;
struct gpio_chip *gchip = &chip->gpio;
int base = pdata->gpio_base;
chip->gpio = gpio_chip_template;
if (chip == &gpio->high) {
if (base > 0)
base += 32;
chip->regbase = gpio->regs + SM501_GPIO_DATA_HIGH;
chip->control = sm->regs + SM501_GPIO63_32_CONTROL;
gchip->label = "SM501-HIGH";
} else {
chip->regbase = gpio->regs + SM501_GPIO_DATA_LOW;
chip->control = sm->regs + SM501_GPIO31_0_CONTROL;
gchip->label = "SM501-LOW";
}
gchip->base = base;
chip->ourgpio = gpio;
return gpiochip_add(gchip);
}
static int sm501_register_gpio(struct sm501_devdata *sm)
{
struct sm501_gpio *gpio = &sm->gpio;
resource_size_t iobase = sm->io_res->start + SM501_GPIO;
int ret;
int tmp;
dev_dbg(sm->dev, "registering gpio block %08llx\n",
(unsigned long long)iobase);
spin_lock_init(&gpio->lock);
gpio->regs_res = request_mem_region(iobase, 0x20, "sm501-gpio");
if (gpio->regs_res == NULL) {
dev_err(sm->dev, "gpio: failed to request region\n");
return -ENXIO;
}
gpio->regs = ioremap(iobase, 0x20);
if (gpio->regs == NULL) {
dev_err(sm->dev, "gpio: failed to remap registers\n");
ret = -ENXIO;
goto err_claimed;
}
/* Register both our chips. */
ret = sm501_gpio_register_chip(sm, gpio, &gpio->low);
if (ret) {
dev_err(sm->dev, "failed to add low chip\n");
goto err_mapped;
}
ret = sm501_gpio_register_chip(sm, gpio, &gpio->high);
if (ret) {
dev_err(sm->dev, "failed to add high chip\n");
goto err_low_chip;
}
gpio->registered = 1;
return 0;
err_low_chip:
tmp = gpiochip_remove(&gpio->low.gpio);
if (tmp) {
dev_err(sm->dev, "cannot remove low chip, cannot tidy up\n");
return ret;
}
err_mapped:
iounmap(gpio->regs);
err_claimed:
release_resource(gpio->regs_res);
kfree(gpio->regs_res);
return ret;
}
static void sm501_gpio_remove(struct sm501_devdata *sm)
{
struct sm501_gpio *gpio = &sm->gpio;
int ret;
if (!sm->gpio.registered)
return;
ret = gpiochip_remove(&gpio->low.gpio);
if (ret)
dev_err(sm->dev, "cannot remove low chip, cannot tidy up\n");
ret = gpiochip_remove(&gpio->high.gpio);
if (ret)
dev_err(sm->dev, "cannot remove high chip, cannot tidy up\n");
iounmap(gpio->regs);
release_resource(gpio->regs_res);
kfree(gpio->regs_res);
}
static inline int sm501_gpio_pin2nr(struct sm501_devdata *sm, unsigned int pin)
{
struct sm501_gpio *gpio = &sm->gpio;
int base = (pin < 32) ? gpio->low.gpio.base : gpio->high.gpio.base;
return (pin % 32) + base;
}
static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
{
return sm->gpio.registered;
}
#else
static inline int sm501_register_gpio(struct sm501_devdata *sm)
{
return 0;
}
static inline void sm501_gpio_remove(struct sm501_devdata *sm)
{
}
static inline int sm501_gpio_pin2nr(struct sm501_devdata *sm, unsigned int pin)
{
return -1;
}
static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
{
return 0;
}
#endif
static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
struct sm501_platdata_gpio_i2c *iic)
{
struct i2c_gpio_platform_data *icd;
struct platform_device *pdev;
pdev = sm501_create_subdev(sm, "i2c-gpio", 0,
sizeof(struct i2c_gpio_platform_data));
if (!pdev)
return -ENOMEM;
icd = pdev->dev.platform_data;
/* We keep the pin_sda and pin_scl fields relative in case the
* same platform data is passed to >1 SM501.
*/
icd->sda_pin = sm501_gpio_pin2nr(sm, iic->pin_sda);
icd->scl_pin = sm501_gpio_pin2nr(sm, iic->pin_scl);
icd->timeout = iic->timeout;
icd->udelay = iic->udelay;
/* note, we can't use either of the pin numbers, as the i2c-gpio
* driver uses the platform.id field to generate the bus number
* to register with the i2c core; The i2c core doesn't have enough
* entries to deal with anything we currently use.
*/
pdev->id = iic->bus_num;
dev_info(sm->dev, "registering i2c-%d: sda=%d (%d), scl=%d (%d)\n",
iic->bus_num,
icd->sda_pin, iic->pin_sda, icd->scl_pin, iic->pin_scl);
return sm501_register_device(sm, pdev);
}
static int sm501_register_gpio_i2c(struct sm501_devdata *sm,
struct sm501_platdata *pdata)
{
struct sm501_platdata_gpio_i2c *iic = pdata->gpio_i2c;
int index;
int ret;
for (index = 0; index < pdata->gpio_i2c_nr; index++, iic++) {
ret = sm501_register_gpio_i2c_instance(sm, iic);
if (ret < 0)
return ret;
}
return 0;
}
/* sm501_dbg_regs
*
* Debug attribute to attach to parent device to show core registers
*/
static ssize_t sm501_dbg_regs(struct device *dev,
struct device_attribute *attr, char *buff)
{
struct sm501_devdata *sm = dev_get_drvdata(dev) ;
unsigned int reg;
char *ptr = buff;
int ret;
for (reg = 0x00; reg < 0x70; reg += 4) {
ret = sprintf(ptr, "%08x = %08x\n",
reg, smc501_readl(sm->regs + reg));
ptr += ret;
}
return ptr - buff;
}
static DEVICE_ATTR(dbg_regs, 0666, sm501_dbg_regs, NULL);
/* sm501_init_reg
*
* Helper function for the init code to setup a register
*
* clear the bits which are set in r->mask, and then set
* the bits set in r->set.
*/
static inline void sm501_init_reg(struct sm501_devdata *sm,
unsigned long reg,
struct sm501_reg_init *r)
{
unsigned long tmp;
tmp = smc501_readl(sm->regs + reg);
tmp &= ~r->mask;
tmp |= r->set;
smc501_writel(tmp, sm->regs + reg);
}
/* sm501_init_regs
*
* Setup core register values
*/
static void sm501_init_regs(struct sm501_devdata *sm,
struct sm501_initdata *init)
{
sm501_misc_control(sm->dev,
init->misc_control.set,
init->misc_control.mask);
sm501_init_reg(sm, SM501_MISC_TIMING, &init->misc_timing);
sm501_init_reg(sm, SM501_GPIO31_0_CONTROL, &init->gpio_low);
sm501_init_reg(sm, SM501_GPIO63_32_CONTROL, &init->gpio_high);
if (init->m1xclk) {
dev_info(sm->dev, "setting M1XCLK to %ld\n", init->m1xclk);
sm501_set_clock(sm->dev, SM501_CLOCK_M1XCLK, init->m1xclk);
}
if (init->mclk) {
dev_info(sm->dev, "setting MCLK to %ld\n", init->mclk);
sm501_set_clock(sm->dev, SM501_CLOCK_MCLK, init->mclk);
}
}
/* Check the PLL sources for the M1CLK and M1XCLK
*
* If the M1CLK and M1XCLKs are not sourced from the same PLL, then
* there is a risk (see errata AB-5) that the SM501 will cease proper
* function. If this happens, then it is likely the SM501 will
* hang the system.
*/
static int sm501_check_clocks(struct sm501_devdata *sm)
{
unsigned long pwrmode = smc501_readl(sm->regs + SM501_CURRENT_CLOCK);
unsigned long msrc = (pwrmode & SM501_POWERMODE_M_SRC);
unsigned long m1src = (pwrmode & SM501_POWERMODE_M1_SRC);
return ((msrc == 0 && m1src != 0) || (msrc != 0 && m1src == 0));
}
static unsigned int sm501_mem_local[] = {
[0] = 4*1024*1024,
[1] = 8*1024*1024,
[2] = 16*1024*1024,
[3] = 32*1024*1024,
[4] = 64*1024*1024,
[5] = 2*1024*1024,
};
/* sm501_init_dev
*
* Common init code for an SM501
*/
static int sm501_init_dev(struct sm501_devdata *sm)
{
struct sm501_initdata *idata;
struct sm501_platdata *pdata;
resource_size_t mem_avail;
unsigned long dramctrl;
unsigned long devid;
int ret;
mutex_init(&sm->clock_lock);
spin_lock_init(&sm->reg_lock);
INIT_LIST_HEAD(&sm->devices);
devid = smc501_readl(sm->regs + SM501_DEVICEID);
if ((devid & SM501_DEVICEID_IDMASK) != SM501_DEVICEID_SM501) {
dev_err(sm->dev, "incorrect device id %08lx\n", devid);
return -EINVAL;
}
/* disable irqs */
smc501_writel(0, sm->regs + SM501_IRQ_MASK);
dramctrl = smc501_readl(sm->regs + SM501_DRAM_CONTROL);
mem_avail = sm501_mem_local[(dramctrl >> 13) & 0x7];
dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n",
sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq);
sm->rev = devid & SM501_DEVICEID_REVMASK;
sm501_dump_gate(sm);
ret = device_create_file(sm->dev, &dev_attr_dbg_regs);
if (ret)
dev_err(sm->dev, "failed to create debug regs file\n");
sm501_dump_clk(sm);
/* check to see if we have some device initialisation */
pdata = sm->platdata;
idata = pdata ? pdata->init : NULL;
if (idata) {
sm501_init_regs(sm, idata);
if (idata->devices & SM501_USE_USB_HOST)
sm501_register_usbhost(sm, &mem_avail);
if (idata->devices & (SM501_USE_UART0 | SM501_USE_UART1))
sm501_register_uart(sm, idata->devices);
if (idata->devices & SM501_USE_GPIO)
sm501_register_gpio(sm);
}
if (pdata && pdata->gpio_i2c != NULL && pdata->gpio_i2c_nr > 0) {
if (!sm501_gpio_isregistered(sm))
dev_err(sm->dev, "no gpio available for i2c gpio.\n");
else
sm501_register_gpio_i2c(sm, pdata);
}
ret = sm501_check_clocks(sm);
if (ret) {
dev_err(sm->dev, "M1X and M clocks sourced from different "
"PLLs\n");
return -EINVAL;
}
/* always create a framebuffer */
sm501_register_display(sm, &mem_avail);
return 0;
}
static int sm501_plat_probe(struct platform_device *dev)
{
struct sm501_devdata *sm;
int ret;
sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL);
if (sm == NULL) {
dev_err(&dev->dev, "no memory for device data\n");
ret = -ENOMEM;
goto err1;
}
sm->dev = &dev->dev;
sm->pdev_id = dev->id;
sm->platdata = dev->dev.platform_data;
ret = platform_get_irq(dev, 0);
if (ret < 0) {
dev_err(&dev->dev, "failed to get irq resource\n");
goto err_res;
}
sm->irq = ret;
sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (sm->io_res == NULL || sm->mem_res == NULL) {
dev_err(&dev->dev, "failed to get IO resource\n");
ret = -ENOENT;
goto err_res;
}
sm->regs_claim = request_mem_region(sm->io_res->start,
0x100, "sm501");
if (sm->regs_claim == NULL) {
dev_err(&dev->dev, "cannot claim registers\n");
ret = -EBUSY;
goto err_res;
}
platform_set_drvdata(dev, sm);
sm->regs = ioremap(sm->io_res->start, resource_size(sm->io_res));
if (sm->regs == NULL) {
dev_err(&dev->dev, "cannot remap registers\n");
ret = -EIO;
goto err_claim;
}
return sm501_init_dev(sm);
err_claim:
release_resource(sm->regs_claim);
kfree(sm->regs_claim);
err_res:
kfree(sm);
err1:
return ret;
}
#ifdef CONFIG_PM
/* power management support */
static void sm501_set_power(struct sm501_devdata *sm, int on)
{
struct sm501_platdata *pd = sm->platdata;
if (pd == NULL)
return;
if (pd->get_power) {
if (pd->get_power(sm->dev) == on) {
dev_dbg(sm->dev, "is already %d\n", on);
return;
}
}
if (pd->set_power) {
dev_dbg(sm->dev, "setting power to %d\n", on);
pd->set_power(sm->dev, on);
sm501_mdelay(sm, 10);
}
}
static int sm501_plat_suspend(struct platform_device *pdev, pm_message_t state)
{
struct sm501_devdata *sm = platform_get_drvdata(pdev);
sm->in_suspend = 1;
sm->pm_misc = smc501_readl(sm->regs + SM501_MISC_CONTROL);
sm501_dump_regs(sm);
if (sm->platdata) {
if (sm->platdata->flags & SM501_FLAG_SUSPEND_OFF)
sm501_set_power(sm, 0);
}
return 0;
}
static int sm501_plat_resume(struct platform_device *pdev)
{
struct sm501_devdata *sm = platform_get_drvdata(pdev);
sm501_set_power(sm, 1);
sm501_dump_regs(sm);
sm501_dump_gate(sm);
sm501_dump_clk(sm);
/* check to see if we are in the same state as when suspended */
if (smc501_readl(sm->regs + SM501_MISC_CONTROL) != sm->pm_misc) {
dev_info(sm->dev, "SM501_MISC_CONTROL changed over sleep\n");
smc501_writel(sm->pm_misc, sm->regs + SM501_MISC_CONTROL);
/* our suspend causes the controller state to change,
* either by something attempting setup, power loss,
* or an external reset event on power change */
if (sm->platdata && sm->platdata->init) {
sm501_init_regs(sm, sm->platdata->init);
}
}
/* dump our state from resume */
sm501_dump_regs(sm);
sm501_dump_clk(sm);
sm->in_suspend = 0;
return 0;
}
#else
#define sm501_plat_suspend NULL
#define sm501_plat_resume NULL
#endif
/* Initialisation data for PCI devices */
static struct sm501_initdata sm501_pci_initdata = {
.gpio_high = {
.set = 0x3F000000, /* 24bit panel */
.mask = 0x0,
},
.misc_timing = {
.set = 0x010100, /* SDRAM timing */
.mask = 0x1F1F00,
},
.misc_control = {
.set = SM501_MISC_PNL_24BIT,
.mask = 0,
},
.devices = SM501_USE_ALL,
/* Errata AB-3 says that 72MHz is the fastest available
* for 33MHZ PCI with proper bus-mastering operation */
.mclk = 72 * MHZ,
.m1xclk = 144 * MHZ,
};
static struct sm501_platdata_fbsub sm501_pdata_fbsub = {
.flags = (SM501FB_FLAG_USE_INIT_MODE |
SM501FB_FLAG_USE_HWCURSOR |
SM501FB_FLAG_USE_HWACCEL |
SM501FB_FLAG_DISABLE_AT_EXIT),
};
static struct sm501_platdata_fb sm501_fb_pdata = {
.fb_route = SM501_FB_OWN,
.fb_crt = &sm501_pdata_fbsub,
.fb_pnl = &sm501_pdata_fbsub,
};
static struct sm501_platdata sm501_pci_platdata = {
.init = &sm501_pci_initdata,
.fb = &sm501_fb_pdata,
.gpio_base = -1,
};
static int sm501_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct sm501_devdata *sm;
int err;
sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL);
if (sm == NULL) {
dev_err(&dev->dev, "no memory for device data\n");
err = -ENOMEM;
goto err1;
}
/* set a default set of platform data */
dev->dev.platform_data = sm->platdata = &sm501_pci_platdata;
/* set a hopefully unique id for our child platform devices */
sm->pdev_id = 32 + dev->devfn;
pci_set_drvdata(dev, sm);
err = pci_enable_device(dev);
if (err) {
dev_err(&dev->dev, "cannot enable device\n");
goto err2;
}
sm->dev = &dev->dev;
sm->irq = dev->irq;
#ifdef __BIG_ENDIAN
/* if the system is big-endian, we most probably have a
* translation in the IO layer making the PCI bus little endian
* so make the framebuffer swapped pixels */
sm501_fb_pdata.flags |= SM501_FBPD_SWAP_FB_ENDIAN;
#endif
/* check our resources */
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM)) {
dev_err(&dev->dev, "region #0 is not memory?\n");
err = -EINVAL;
goto err3;
}
if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM)) {
dev_err(&dev->dev, "region #1 is not memory?\n");
err = -EINVAL;
goto err3;
}
/* make our resources ready for sharing */
sm->io_res = &dev->resource[1];
sm->mem_res = &dev->resource[0];
sm->regs_claim = request_mem_region(sm->io_res->start,
0x100, "sm501");
if (sm->regs_claim == NULL) {
dev_err(&dev->dev, "cannot claim registers\n");
err= -EBUSY;
goto err3;
}
sm->regs = pci_ioremap_bar(dev, 1);
if (sm->regs == NULL) {
dev_err(&dev->dev, "cannot remap registers\n");
err = -EIO;
goto err4;
}
sm501_init_dev(sm);
return 0;
err4:
release_resource(sm->regs_claim);
kfree(sm->regs_claim);
err3:
pci_disable_device(dev);
err2:
pci_set_drvdata(dev, NULL);
kfree(sm);
err1:
return err;
}
static void sm501_remove_sub(struct sm501_devdata *sm,
struct sm501_device *smdev)
{
list_del(&smdev->list);
platform_device_unregister(&smdev->pdev);
}
static void sm501_dev_remove(struct sm501_devdata *sm)
{
struct sm501_device *smdev, *tmp;
list_for_each_entry_safe(smdev, tmp, &sm->devices, list)
sm501_remove_sub(sm, smdev);
device_remove_file(sm->dev, &dev_attr_dbg_regs);
sm501_gpio_remove(sm);
}
static void sm501_pci_remove(struct pci_dev *dev)
{
struct sm501_devdata *sm = pci_get_drvdata(dev);
sm501_dev_remove(sm);
iounmap(sm->regs);
release_resource(sm->regs_claim);
kfree(sm->regs_claim);
pci_set_drvdata(dev, NULL);
pci_disable_device(dev);
}
static int sm501_plat_remove(struct platform_device *dev)
{
struct sm501_devdata *sm = platform_get_drvdata(dev);
sm501_dev_remove(sm);
iounmap(sm->regs);
release_resource(sm->regs_claim);
kfree(sm->regs_claim);
return 0;
}
static DEFINE_PCI_DEVICE_TABLE(sm501_pci_tbl) = {
{ 0x126f, 0x0501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, sm501_pci_tbl);
static struct pci_driver sm501_pci_driver = {
.name = "sm501",
.id_table = sm501_pci_tbl,
.probe = sm501_pci_probe,
.remove = sm501_pci_remove,
};
MODULE_ALIAS("platform:sm501");
static struct of_device_id of_sm501_match_tbl[] = {
{ .compatible = "smi,sm501", },
{ /* end */ }
};
static struct platform_driver sm501_plat_driver = {
.driver = {
.name = "sm501",
.owner = THIS_MODULE,
.of_match_table = of_sm501_match_tbl,
},
.probe = sm501_plat_probe,
.remove = sm501_plat_remove,
.suspend = sm501_plat_suspend,
.resume = sm501_plat_resume,
};
static int __init sm501_base_init(void)
{
platform_driver_register(&sm501_plat_driver);
return pci_register_driver(&sm501_pci_driver);
}
static void __exit sm501_base_exit(void)
{
platform_driver_unregister(&sm501_plat_driver);
pci_unregister_driver(&sm501_pci_driver);
}
module_init(sm501_base_init);
module_exit(sm501_base_exit);
MODULE_DESCRIPTION("SM501 Core Driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, Vincent Sanders");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
jcadduono/android_kernel_oneplus_msm8996 | arch/powerpc/platforms/powermac/pfunc_core.c | 1922 | 25629 | /*
*
* FIXME: Properly make this race free with refcounting etc...
*
* FIXME: LOCKING !!!
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <asm/prom.h>
#include <asm/pmac_pfunc.h>
/* Debug */
#define LOG_PARSE(fmt...)
#define LOG_ERROR(fmt...) printk(fmt)
#define LOG_BLOB(t,b,c)
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
/* Command numbers */
#define PMF_CMD_LIST 0
#define PMF_CMD_WRITE_GPIO 1
#define PMF_CMD_READ_GPIO 2
#define PMF_CMD_WRITE_REG32 3
#define PMF_CMD_READ_REG32 4
#define PMF_CMD_WRITE_REG16 5
#define PMF_CMD_READ_REG16 6
#define PMF_CMD_WRITE_REG8 7
#define PMF_CMD_READ_REG8 8
#define PMF_CMD_DELAY 9
#define PMF_CMD_WAIT_REG32 10
#define PMF_CMD_WAIT_REG16 11
#define PMF_CMD_WAIT_REG8 12
#define PMF_CMD_READ_I2C 13
#define PMF_CMD_WRITE_I2C 14
#define PMF_CMD_RMW_I2C 15
#define PMF_CMD_GEN_I2C 16
#define PMF_CMD_SHIFT_BYTES_RIGHT 17
#define PMF_CMD_SHIFT_BYTES_LEFT 18
#define PMF_CMD_READ_CFG 19
#define PMF_CMD_WRITE_CFG 20
#define PMF_CMD_RMW_CFG 21
#define PMF_CMD_READ_I2C_SUBADDR 22
#define PMF_CMD_WRITE_I2C_SUBADDR 23
#define PMF_CMD_SET_I2C_MODE 24
#define PMF_CMD_RMW_I2C_SUBADDR 25
#define PMF_CMD_READ_REG32_MASK_SHR_XOR 26
#define PMF_CMD_READ_REG16_MASK_SHR_XOR 27
#define PMF_CMD_READ_REG8_MASK_SHR_XOR 28
#define PMF_CMD_WRITE_REG32_SHL_MASK 29
#define PMF_CMD_WRITE_REG16_SHL_MASK 30
#define PMF_CMD_WRITE_REG8_SHL_MASK 31
#define PMF_CMD_MASK_AND_COMPARE 32
#define PMF_CMD_COUNT 33
/* This structure holds the state of the parser while walking through
* a function definition
*/
struct pmf_cmd {
const void *cmdptr;
const void *cmdend;
struct pmf_function *func;
void *instdata;
struct pmf_args *args;
int error;
};
#if 0
/* Debug output */
static void print_blob(const char *title, const void *blob, int bytes)
{
printk("%s", title);
while(bytes--) {
printk("%02x ", *((u8 *)blob));
blob += 1;
}
printk("\n");
}
#endif
/*
* Parser helpers
*/
static u32 pmf_next32(struct pmf_cmd *cmd)
{
u32 value;
if ((cmd->cmdend - cmd->cmdptr) < 4) {
cmd->error = 1;
return 0;
}
value = *((u32 *)cmd->cmdptr);
cmd->cmdptr += 4;
return value;
}
static const void* pmf_next_blob(struct pmf_cmd *cmd, int count)
{
const void *value;
if ((cmd->cmdend - cmd->cmdptr) < count) {
cmd->error = 1;
return NULL;
}
value = cmd->cmdptr;
cmd->cmdptr += count;
return value;
}
/*
* Individual command parsers
*/
#define PMF_PARSE_CALL(name, cmd, handlers, p...) \
do { \
if (cmd->error) \
return -ENXIO; \
if (handlers == NULL) \
return 0; \
if (handlers->name) \
return handlers->name(cmd->func, cmd->instdata, \
cmd->args, p); \
return -1; \
} while(0) \
static int pmf_parser_write_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 value = (u8)pmf_next32(cmd);
u8 mask = (u8)pmf_next32(cmd);
LOG_PARSE("pmf: write_gpio(value: %02x, mask: %02x)\n", value, mask);
PMF_PARSE_CALL(write_gpio, cmd, h, value, mask);
}
static int pmf_parser_read_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 mask = (u8)pmf_next32(cmd);
int rshift = (int)pmf_next32(cmd);
u8 xor = (u8)pmf_next32(cmd);
LOG_PARSE("pmf: read_gpio(mask: %02x, rshift: %d, xor: %02x)\n",
mask, rshift, xor);
PMF_PARSE_CALL(read_gpio, cmd, h, mask, rshift, xor);
}
static int pmf_parser_write_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 value = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: write_reg32(offset: %08x, value: %08x, mask: %08x)\n",
offset, value, mask);
PMF_PARSE_CALL(write_reg32, cmd, h, offset, value, mask);
}
static int pmf_parser_read_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg32(offset: %08x)\n", offset);
PMF_PARSE_CALL(read_reg32, cmd, h, offset);
}
static int pmf_parser_write_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u16 value = (u16)pmf_next32(cmd);
u16 mask = (u16)pmf_next32(cmd);
LOG_PARSE("pmf: write_reg16(offset: %08x, value: %04x, mask: %04x)\n",
offset, value, mask);
PMF_PARSE_CALL(write_reg16, cmd, h, offset, value, mask);
}
static int pmf_parser_read_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg16(offset: %08x)\n", offset);
PMF_PARSE_CALL(read_reg16, cmd, h, offset);
}
static int pmf_parser_write_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u8 value = (u16)pmf_next32(cmd);
u8 mask = (u16)pmf_next32(cmd);
LOG_PARSE("pmf: write_reg8(offset: %08x, value: %02x, mask: %02x)\n",
offset, value, mask);
PMF_PARSE_CALL(write_reg8, cmd, h, offset, value, mask);
}
static int pmf_parser_read_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg8(offset: %08x)\n", offset);
PMF_PARSE_CALL(read_reg8, cmd, h, offset);
}
static int pmf_parser_delay(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 duration = pmf_next32(cmd);
LOG_PARSE("pmf: delay(duration: %d us)\n", duration);
PMF_PARSE_CALL(delay, cmd, h, duration);
}
static int pmf_parser_wait_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 value = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: wait_reg32(offset: %08x, comp_value: %08x,mask: %08x)\n",
offset, value, mask);
PMF_PARSE_CALL(wait_reg32, cmd, h, offset, value, mask);
}
static int pmf_parser_wait_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u16 value = (u16)pmf_next32(cmd);
u16 mask = (u16)pmf_next32(cmd);
LOG_PARSE("pmf: wait_reg16(offset: %08x, comp_value: %04x,mask: %04x)\n",
offset, value, mask);
PMF_PARSE_CALL(wait_reg16, cmd, h, offset, value, mask);
}
static int pmf_parser_wait_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u8 value = (u8)pmf_next32(cmd);
u8 mask = (u8)pmf_next32(cmd);
LOG_PARSE("pmf: wait_reg8(offset: %08x, comp_value: %02x,mask: %02x)\n",
offset, value, mask);
PMF_PARSE_CALL(wait_reg8, cmd, h, offset, value, mask);
}
static int pmf_parser_read_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 bytes = pmf_next32(cmd);
LOG_PARSE("pmf: read_i2c(bytes: %ud)\n", bytes);
PMF_PARSE_CALL(read_i2c, cmd, h, bytes);
}
static int pmf_parser_write_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 bytes = pmf_next32(cmd);
const void *blob = pmf_next_blob(cmd, bytes);
LOG_PARSE("pmf: write_i2c(bytes: %ud) ...\n", bytes);
LOG_BLOB("pmf: data: \n", blob, bytes);
PMF_PARSE_CALL(write_i2c, cmd, h, bytes, blob);
}
static int pmf_parser_rmw_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 maskbytes = pmf_next32(cmd);
u32 valuesbytes = pmf_next32(cmd);
u32 totalbytes = pmf_next32(cmd);
const void *maskblob = pmf_next_blob(cmd, maskbytes);
const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
LOG_PARSE("pmf: rmw_i2c(maskbytes: %ud, valuebytes: %ud, "
"totalbytes: %d) ...\n",
maskbytes, valuesbytes, totalbytes);
LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
PMF_PARSE_CALL(rmw_i2c, cmd, h, maskbytes, valuesbytes, totalbytes,
maskblob, valuesblob);
}
static int pmf_parser_read_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 bytes = pmf_next32(cmd);
LOG_PARSE("pmf: read_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
PMF_PARSE_CALL(read_cfg, cmd, h, offset, bytes);
}
static int pmf_parser_write_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 bytes = pmf_next32(cmd);
const void *blob = pmf_next_blob(cmd, bytes);
LOG_PARSE("pmf: write_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
LOG_BLOB("pmf: data: \n", blob, bytes);
PMF_PARSE_CALL(write_cfg, cmd, h, offset, bytes, blob);
}
static int pmf_parser_rmw_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 maskbytes = pmf_next32(cmd);
u32 valuesbytes = pmf_next32(cmd);
u32 totalbytes = pmf_next32(cmd);
const void *maskblob = pmf_next_blob(cmd, maskbytes);
const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
LOG_PARSE("pmf: rmw_cfg(maskbytes: %ud, valuebytes: %ud,"
" totalbytes: %d) ...\n",
maskbytes, valuesbytes, totalbytes);
LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
PMF_PARSE_CALL(rmw_cfg, cmd, h, offset, maskbytes, valuesbytes,
totalbytes, maskblob, valuesblob);
}
static int pmf_parser_read_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 subaddr = (u8)pmf_next32(cmd);
u32 bytes = pmf_next32(cmd);
LOG_PARSE("pmf: read_i2c_sub(subaddr: %x, bytes: %ud)\n",
subaddr, bytes);
PMF_PARSE_CALL(read_i2c_sub, cmd, h, subaddr, bytes);
}
static int pmf_parser_write_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 subaddr = (u8)pmf_next32(cmd);
u32 bytes = pmf_next32(cmd);
const void *blob = pmf_next_blob(cmd, bytes);
LOG_PARSE("pmf: write_i2c_sub(subaddr: %x, bytes: %ud) ...\n",
subaddr, bytes);
LOG_BLOB("pmf: data: \n", blob, bytes);
PMF_PARSE_CALL(write_i2c_sub, cmd, h, subaddr, bytes, blob);
}
static int pmf_parser_set_i2c_mode(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 mode = pmf_next32(cmd);
LOG_PARSE("pmf: set_i2c_mode(mode: %d)\n", mode);
PMF_PARSE_CALL(set_i2c_mode, cmd, h, mode);
}
static int pmf_parser_rmw_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 subaddr = (u8)pmf_next32(cmd);
u32 maskbytes = pmf_next32(cmd);
u32 valuesbytes = pmf_next32(cmd);
u32 totalbytes = pmf_next32(cmd);
const void *maskblob = pmf_next_blob(cmd, maskbytes);
const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
LOG_PARSE("pmf: rmw_i2c_sub(subaddr: %x, maskbytes: %ud, valuebytes: %ud"
", totalbytes: %d) ...\n",
subaddr, maskbytes, valuesbytes, totalbytes);
LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
PMF_PARSE_CALL(rmw_i2c_sub, cmd, h, subaddr, maskbytes, valuesbytes,
totalbytes, maskblob, valuesblob);
}
static int pmf_parser_read_reg32_msrx(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 xor = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg32_msrx(offset: %x, mask: %x, shift: %x,"
" xor: %x\n", offset, mask, shift, xor);
PMF_PARSE_CALL(read_reg32_msrx, cmd, h, offset, mask, shift, xor);
}
static int pmf_parser_read_reg16_msrx(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 xor = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg16_msrx(offset: %x, mask: %x, shift: %x,"
" xor: %x\n", offset, mask, shift, xor);
PMF_PARSE_CALL(read_reg16_msrx, cmd, h, offset, mask, shift, xor);
}
static int pmf_parser_read_reg8_msrx(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 xor = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg8_msrx(offset: %x, mask: %x, shift: %x,"
" xor: %x\n", offset, mask, shift, xor);
PMF_PARSE_CALL(read_reg8_msrx, cmd, h, offset, mask, shift, xor);
}
static int pmf_parser_write_reg32_slm(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: write_reg32_slm(offset: %x, shift: %x, mask: %x\n",
offset, shift, mask);
PMF_PARSE_CALL(write_reg32_slm, cmd, h, offset, shift, mask);
}
static int pmf_parser_write_reg16_slm(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: write_reg16_slm(offset: %x, shift: %x, mask: %x\n",
offset, shift, mask);
PMF_PARSE_CALL(write_reg16_slm, cmd, h, offset, shift, mask);
}
static int pmf_parser_write_reg8_slm(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: write_reg8_slm(offset: %x, shift: %x, mask: %x\n",
offset, shift, mask);
PMF_PARSE_CALL(write_reg8_slm, cmd, h, offset, shift, mask);
}
static int pmf_parser_mask_and_compare(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 bytes = pmf_next32(cmd);
const void *maskblob = pmf_next_blob(cmd, bytes);
const void *valuesblob = pmf_next_blob(cmd, bytes);
LOG_PARSE("pmf: mask_and_compare(length: %ud ...\n", bytes);
LOG_BLOB("pmf: mask data: \n", maskblob, bytes);
LOG_BLOB("pmf: values data: \n", valuesblob, bytes);
PMF_PARSE_CALL(mask_and_compare, cmd, h,
bytes, maskblob, valuesblob);
}
typedef int (*pmf_cmd_parser_t)(struct pmf_cmd *cmd, struct pmf_handlers *h);
static pmf_cmd_parser_t pmf_parsers[PMF_CMD_COUNT] =
{
NULL,
pmf_parser_write_gpio,
pmf_parser_read_gpio,
pmf_parser_write_reg32,
pmf_parser_read_reg32,
pmf_parser_write_reg16,
pmf_parser_read_reg16,
pmf_parser_write_reg8,
pmf_parser_read_reg8,
pmf_parser_delay,
pmf_parser_wait_reg32,
pmf_parser_wait_reg16,
pmf_parser_wait_reg8,
pmf_parser_read_i2c,
pmf_parser_write_i2c,
pmf_parser_rmw_i2c,
NULL, /* Bogus command */
NULL, /* Shift bytes right: NYI */
NULL, /* Shift bytes left: NYI */
pmf_parser_read_cfg,
pmf_parser_write_cfg,
pmf_parser_rmw_cfg,
pmf_parser_read_i2c_sub,
pmf_parser_write_i2c_sub,
pmf_parser_set_i2c_mode,
pmf_parser_rmw_i2c_sub,
pmf_parser_read_reg32_msrx,
pmf_parser_read_reg16_msrx,
pmf_parser_read_reg8_msrx,
pmf_parser_write_reg32_slm,
pmf_parser_write_reg16_slm,
pmf_parser_write_reg8_slm,
pmf_parser_mask_and_compare,
};
struct pmf_device {
struct list_head link;
struct device_node *node;
struct pmf_handlers *handlers;
struct list_head functions;
struct kref ref;
};
static LIST_HEAD(pmf_devices);
static DEFINE_SPINLOCK(pmf_lock);
static DEFINE_MUTEX(pmf_irq_mutex);
static void pmf_release_device(struct kref *kref)
{
struct pmf_device *dev = container_of(kref, struct pmf_device, ref);
kfree(dev);
}
static inline void pmf_put_device(struct pmf_device *dev)
{
kref_put(&dev->ref, pmf_release_device);
}
static inline struct pmf_device *pmf_get_device(struct pmf_device *dev)
{
kref_get(&dev->ref);
return dev;
}
static inline struct pmf_device *pmf_find_device(struct device_node *np)
{
struct pmf_device *dev;
list_for_each_entry(dev, &pmf_devices, link) {
if (dev->node == np)
return pmf_get_device(dev);
}
return NULL;
}
static int pmf_parse_one(struct pmf_function *func,
struct pmf_handlers *handlers,
void *instdata, struct pmf_args *args)
{
struct pmf_cmd cmd;
u32 ccode;
int count, rc;
cmd.cmdptr = func->data;
cmd.cmdend = func->data + func->length;
cmd.func = func;
cmd.instdata = instdata;
cmd.args = args;
cmd.error = 0;
LOG_PARSE("pmf: func %s, %d bytes, %s...\n",
func->name, func->length,
handlers ? "executing" : "parsing");
/* One subcommand to parse for now */
count = 1;
while(count-- && cmd.cmdptr < cmd.cmdend) {
/* Get opcode */
ccode = pmf_next32(&cmd);
/* Check if we are hitting a command list, fetch new count */
if (ccode == 0) {
count = pmf_next32(&cmd) - 1;
ccode = pmf_next32(&cmd);
}
if (cmd.error) {
LOG_ERROR("pmf: parse error, not enough data\n");
return -ENXIO;
}
if (ccode >= PMF_CMD_COUNT) {
LOG_ERROR("pmf: command code %d unknown !\n", ccode);
return -ENXIO;
}
if (pmf_parsers[ccode] == NULL) {
LOG_ERROR("pmf: no parser for command %d !\n", ccode);
return -ENXIO;
}
rc = pmf_parsers[ccode](&cmd, handlers);
if (rc != 0) {
LOG_ERROR("pmf: parser for command %d returned"
" error %d\n", ccode, rc);
return rc;
}
}
/* We are doing an initial parse pass, we need to adjust the size */
if (handlers == NULL)
func->length = cmd.cmdptr - func->data;
return 0;
}
static int pmf_add_function_prop(struct pmf_device *dev, void *driverdata,
const char *name, u32 *data,
unsigned int length)
{
int count = 0;
struct pmf_function *func = NULL;
DBG("pmf: Adding functions for platform-do-%s\n", name);
while (length >= 12) {
/* Allocate a structure */
func = kzalloc(sizeof(struct pmf_function), GFP_KERNEL);
if (func == NULL)
goto bail;
kref_init(&func->ref);
INIT_LIST_HEAD(&func->irq_clients);
func->node = dev->node;
func->driver_data = driverdata;
func->name = name;
func->phandle = data[0];
func->flags = data[1];
data += 2;
length -= 8;
func->data = data;
func->length = length;
func->dev = dev;
DBG("pmf: idx %d: flags=%08x, phandle=%08x "
" %d bytes remaining, parsing...\n",
count+1, func->flags, func->phandle, length);
if (pmf_parse_one(func, NULL, NULL, NULL)) {
kfree(func);
goto bail;
}
length -= func->length;
data = (u32 *)(((u8 *)data) + func->length);
list_add(&func->link, &dev->functions);
pmf_get_device(dev);
count++;
}
bail:
DBG("pmf: Added %d functions\n", count);
return count;
}
static int pmf_add_functions(struct pmf_device *dev, void *driverdata)
{
struct property *pp;
#define PP_PREFIX "platform-do-"
const int plen = strlen(PP_PREFIX);
int count = 0;
for (pp = dev->node->properties; pp != 0; pp = pp->next) {
const char *name;
if (strncmp(pp->name, PP_PREFIX, plen) != 0)
continue;
name = pp->name + plen;
if (strlen(name) && pp->length >= 12)
count += pmf_add_function_prop(dev, driverdata, name,
pp->value, pp->length);
}
return count;
}
int pmf_register_driver(struct device_node *np,
struct pmf_handlers *handlers,
void *driverdata)
{
struct pmf_device *dev;
unsigned long flags;
int rc = 0;
if (handlers == NULL)
return -EINVAL;
DBG("pmf: registering driver for node %s\n", np->full_name);
spin_lock_irqsave(&pmf_lock, flags);
dev = pmf_find_device(np);
spin_unlock_irqrestore(&pmf_lock, flags);
if (dev != NULL) {
DBG("pmf: already there !\n");
pmf_put_device(dev);
return -EBUSY;
}
dev = kzalloc(sizeof(struct pmf_device), GFP_KERNEL);
if (dev == NULL) {
DBG("pmf: no memory !\n");
return -ENOMEM;
}
kref_init(&dev->ref);
dev->node = of_node_get(np);
dev->handlers = handlers;
INIT_LIST_HEAD(&dev->functions);
rc = pmf_add_functions(dev, driverdata);
if (rc == 0) {
DBG("pmf: no functions, disposing.. \n");
of_node_put(np);
kfree(dev);
return -ENODEV;
}
spin_lock_irqsave(&pmf_lock, flags);
list_add(&dev->link, &pmf_devices);
spin_unlock_irqrestore(&pmf_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(pmf_register_driver);
struct pmf_function *pmf_get_function(struct pmf_function *func)
{
if (!try_module_get(func->dev->handlers->owner))
return NULL;
kref_get(&func->ref);
return func;
}
EXPORT_SYMBOL_GPL(pmf_get_function);
static void pmf_release_function(struct kref *kref)
{
struct pmf_function *func =
container_of(kref, struct pmf_function, ref);
pmf_put_device(func->dev);
kfree(func);
}
static inline void __pmf_put_function(struct pmf_function *func)
{
kref_put(&func->ref, pmf_release_function);
}
void pmf_put_function(struct pmf_function *func)
{
if (func == NULL)
return;
module_put(func->dev->handlers->owner);
__pmf_put_function(func);
}
EXPORT_SYMBOL_GPL(pmf_put_function);
void pmf_unregister_driver(struct device_node *np)
{
struct pmf_device *dev;
unsigned long flags;
DBG("pmf: unregistering driver for node %s\n", np->full_name);
spin_lock_irqsave(&pmf_lock, flags);
dev = pmf_find_device(np);
if (dev == NULL) {
DBG("pmf: not such driver !\n");
spin_unlock_irqrestore(&pmf_lock, flags);
return;
}
list_del(&dev->link);
while(!list_empty(&dev->functions)) {
struct pmf_function *func =
list_entry(dev->functions.next, typeof(*func), link);
list_del(&func->link);
__pmf_put_function(func);
}
pmf_put_device(dev);
spin_unlock_irqrestore(&pmf_lock, flags);
}
EXPORT_SYMBOL_GPL(pmf_unregister_driver);
struct pmf_function *__pmf_find_function(struct device_node *target,
const char *name, u32 flags)
{
struct device_node *actor = of_node_get(target);
struct pmf_device *dev;
struct pmf_function *func, *result = NULL;
char fname[64];
const u32 *prop;
u32 ph;
/*
* Look for a "platform-*" function reference. If we can't find
* one, then we fallback to a direct call attempt
*/
snprintf(fname, 63, "platform-%s", name);
prop = of_get_property(target, fname, NULL);
if (prop == NULL)
goto find_it;
ph = *prop;
if (ph == 0)
goto find_it;
/*
* Ok, now try to find the actor. If we can't find it, we fail,
* there is no point in falling back there
*/
of_node_put(actor);
actor = of_find_node_by_phandle(ph);
if (actor == NULL)
return NULL;
find_it:
dev = pmf_find_device(actor);
if (dev == NULL) {
result = NULL;
goto out;
}
list_for_each_entry(func, &dev->functions, link) {
if (name && strcmp(name, func->name))
continue;
if (func->phandle && target->phandle != func->phandle)
continue;
if ((func->flags & flags) == 0)
continue;
result = func;
break;
}
pmf_put_device(dev);
out:
of_node_put(actor);
return result;
}
int pmf_register_irq_client(struct device_node *target,
const char *name,
struct pmf_irq_client *client)
{
struct pmf_function *func;
unsigned long flags;
spin_lock_irqsave(&pmf_lock, flags);
func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN);
if (func)
func = pmf_get_function(func);
spin_unlock_irqrestore(&pmf_lock, flags);
if (func == NULL)
return -ENODEV;
/* guard against manipulations of list */
mutex_lock(&pmf_irq_mutex);
if (list_empty(&func->irq_clients))
func->dev->handlers->irq_enable(func);
/* guard against pmf_do_irq while changing list */
spin_lock_irqsave(&pmf_lock, flags);
list_add(&client->link, &func->irq_clients);
spin_unlock_irqrestore(&pmf_lock, flags);
client->func = func;
mutex_unlock(&pmf_irq_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(pmf_register_irq_client);
void pmf_unregister_irq_client(struct pmf_irq_client *client)
{
struct pmf_function *func = client->func;
unsigned long flags;
BUG_ON(func == NULL);
/* guard against manipulations of list */
mutex_lock(&pmf_irq_mutex);
client->func = NULL;
/* guard against pmf_do_irq while changing list */
spin_lock_irqsave(&pmf_lock, flags);
list_del(&client->link);
spin_unlock_irqrestore(&pmf_lock, flags);
if (list_empty(&func->irq_clients))
func->dev->handlers->irq_disable(func);
mutex_unlock(&pmf_irq_mutex);
pmf_put_function(func);
}
EXPORT_SYMBOL_GPL(pmf_unregister_irq_client);
void pmf_do_irq(struct pmf_function *func)
{
unsigned long flags;
struct pmf_irq_client *client;
/* For now, using a spinlock over the whole function. Can be made
* to drop the lock using 2 lists if necessary
*/
spin_lock_irqsave(&pmf_lock, flags);
list_for_each_entry(client, &func->irq_clients, link) {
if (!try_module_get(client->owner))
continue;
client->handler(client->data);
module_put(client->owner);
}
spin_unlock_irqrestore(&pmf_lock, flags);
}
EXPORT_SYMBOL_GPL(pmf_do_irq);
int pmf_call_one(struct pmf_function *func, struct pmf_args *args)
{
struct pmf_device *dev = func->dev;
void *instdata = NULL;
int rc = 0;
DBG(" ** pmf_call_one(%s/%s) **\n", dev->node->full_name, func->name);
if (dev->handlers->begin)
instdata = dev->handlers->begin(func, args);
rc = pmf_parse_one(func, dev->handlers, instdata, args);
if (dev->handlers->end)
dev->handlers->end(func, instdata);
return rc;
}
EXPORT_SYMBOL_GPL(pmf_call_one);
int pmf_do_functions(struct device_node *np, const char *name,
u32 phandle, u32 fflags, struct pmf_args *args)
{
struct pmf_device *dev;
struct pmf_function *func, *tmp;
unsigned long flags;
int rc = -ENODEV;
spin_lock_irqsave(&pmf_lock, flags);
dev = pmf_find_device(np);
if (dev == NULL) {
spin_unlock_irqrestore(&pmf_lock, flags);
return -ENODEV;
}
list_for_each_entry_safe(func, tmp, &dev->functions, link) {
if (name && strcmp(name, func->name))
continue;
if (phandle && func->phandle && phandle != func->phandle)
continue;
if ((func->flags & fflags) == 0)
continue;
if (pmf_get_function(func) == NULL)
continue;
spin_unlock_irqrestore(&pmf_lock, flags);
rc = pmf_call_one(func, args);
pmf_put_function(func);
spin_lock_irqsave(&pmf_lock, flags);
}
pmf_put_device(dev);
spin_unlock_irqrestore(&pmf_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(pmf_do_functions);
struct pmf_function *pmf_find_function(struct device_node *target,
const char *name)
{
struct pmf_function *func;
unsigned long flags;
spin_lock_irqsave(&pmf_lock, flags);
func = __pmf_find_function(target, name, PMF_FLAGS_ON_DEMAND);
if (func)
func = pmf_get_function(func);
spin_unlock_irqrestore(&pmf_lock, flags);
return func;
}
EXPORT_SYMBOL_GPL(pmf_find_function);
int pmf_call_function(struct device_node *target, const char *name,
struct pmf_args *args)
{
struct pmf_function *func = pmf_find_function(target, name);
int rc;
if (func == NULL)
return -ENODEV;
rc = pmf_call_one(func, args);
pmf_put_function(func);
return rc;
}
EXPORT_SYMBOL_GPL(pmf_call_function);
| gpl-2.0 |
WarrickJiang/linux-stable | arch/mips/mm/c-r3k.c | 1922 | 8111 | /*
* r2300.c: R2000 and R3000 specific mmu/cache code.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*
* with a lot of changes to make this thing work for R3000s
* Tx39XX R4k style caches added. HK
* Copyright (C) 1998, 1999, 2000 Harald Koerfgen
* Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
* Copyright (C) 2001, 2004, 2007 Maciej W. Rozycki
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/isadep.h>
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
static unsigned long icache_size, dcache_size; /* Size in bytes */
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
unsigned long r3k_cache_size(unsigned long ca_flags)
{
unsigned long flags, status, dummy, size;
volatile unsigned long *p;
p = (volatile unsigned long *) KSEG0;
flags = read_c0_status();
/* isolate cache space */
write_c0_status((ca_flags|flags)&~ST0_IEC);
*p = 0xa5a55a5a;
dummy = *p;
status = read_c0_status();
if (dummy != 0xa5a55a5a || (status & ST0_CM)) {
size = 0;
} else {
for (size = 128; size <= 0x40000; size <<= 1)
*(p + size) = 0;
*p = -1;
for (size = 128;
(size <= 0x40000) && (*(p + size) == 0);
size <<= 1)
;
if (size > 0x40000)
size = 0;
}
write_c0_status(flags);
return size * sizeof(*p);
}
unsigned long r3k_cache_lsize(unsigned long ca_flags)
{
unsigned long flags, status, lsize, i;
volatile unsigned long *p;
p = (volatile unsigned long *) KSEG0;
flags = read_c0_status();
/* isolate cache space */
write_c0_status((ca_flags|flags)&~ST0_IEC);
for (i = 0; i < 128; i++)
*(p + i) = 0;
*(volatile unsigned char *)p = 0;
for (lsize = 1; lsize < 128; lsize <<= 1) {
*(p + lsize);
status = read_c0_status();
if (!(status & ST0_CM))
break;
}
for (i = 0; i < 128; i += lsize)
*(volatile unsigned char *)(p + i) = 0;
write_c0_status(flags);
return lsize * sizeof(*p);
}
static void r3k_probe_cache(void)
{
dcache_size = r3k_cache_size(ST0_ISC);
if (dcache_size)
dcache_lsize = r3k_cache_lsize(ST0_ISC);
icache_size = r3k_cache_size(ST0_ISC|ST0_SWC);
if (icache_size)
icache_lsize = r3k_cache_lsize(ST0_ISC|ST0_SWC);
}
static void r3k_flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long size, i, flags;
volatile unsigned char *p;
size = end - start;
if (size > icache_size || KSEGX(start) != KSEG0) {
start = KSEG0;
size = icache_size;
}
p = (char *)start;
flags = read_c0_status();
/* isolate cache space */
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
"sb\t$0, 0x010(%0)\n\t"
"sb\t$0, 0x014(%0)\n\t"
"sb\t$0, 0x018(%0)\n\t"
"sb\t$0, 0x01c(%0)\n\t"
"sb\t$0, 0x020(%0)\n\t"
"sb\t$0, 0x024(%0)\n\t"
"sb\t$0, 0x028(%0)\n\t"
"sb\t$0, 0x02c(%0)\n\t"
"sb\t$0, 0x030(%0)\n\t"
"sb\t$0, 0x034(%0)\n\t"
"sb\t$0, 0x038(%0)\n\t"
"sb\t$0, 0x03c(%0)\n\t"
"sb\t$0, 0x040(%0)\n\t"
"sb\t$0, 0x044(%0)\n\t"
"sb\t$0, 0x048(%0)\n\t"
"sb\t$0, 0x04c(%0)\n\t"
"sb\t$0, 0x050(%0)\n\t"
"sb\t$0, 0x054(%0)\n\t"
"sb\t$0, 0x058(%0)\n\t"
"sb\t$0, 0x05c(%0)\n\t"
"sb\t$0, 0x060(%0)\n\t"
"sb\t$0, 0x064(%0)\n\t"
"sb\t$0, 0x068(%0)\n\t"
"sb\t$0, 0x06c(%0)\n\t"
"sb\t$0, 0x070(%0)\n\t"
"sb\t$0, 0x074(%0)\n\t"
"sb\t$0, 0x078(%0)\n\t"
"sb\t$0, 0x07c(%0)\n\t"
: : "r" (p) );
p += 0x080;
}
write_c0_status(flags);
}
static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
{
unsigned long size, i, flags;
volatile unsigned char *p;
size = end - start;
if (size > dcache_size || KSEGX(start) != KSEG0) {
start = KSEG0;
size = dcache_size;
}
p = (char *)start;
flags = read_c0_status();
/* isolate cache space */
write_c0_status((ST0_ISC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
"sb\t$0, 0x010(%0)\n\t"
"sb\t$0, 0x014(%0)\n\t"
"sb\t$0, 0x018(%0)\n\t"
"sb\t$0, 0x01c(%0)\n\t"
"sb\t$0, 0x020(%0)\n\t"
"sb\t$0, 0x024(%0)\n\t"
"sb\t$0, 0x028(%0)\n\t"
"sb\t$0, 0x02c(%0)\n\t"
"sb\t$0, 0x030(%0)\n\t"
"sb\t$0, 0x034(%0)\n\t"
"sb\t$0, 0x038(%0)\n\t"
"sb\t$0, 0x03c(%0)\n\t"
"sb\t$0, 0x040(%0)\n\t"
"sb\t$0, 0x044(%0)\n\t"
"sb\t$0, 0x048(%0)\n\t"
"sb\t$0, 0x04c(%0)\n\t"
"sb\t$0, 0x050(%0)\n\t"
"sb\t$0, 0x054(%0)\n\t"
"sb\t$0, 0x058(%0)\n\t"
"sb\t$0, 0x05c(%0)\n\t"
"sb\t$0, 0x060(%0)\n\t"
"sb\t$0, 0x064(%0)\n\t"
"sb\t$0, 0x068(%0)\n\t"
"sb\t$0, 0x06c(%0)\n\t"
"sb\t$0, 0x070(%0)\n\t"
"sb\t$0, 0x074(%0)\n\t"
"sb\t$0, 0x078(%0)\n\t"
"sb\t$0, 0x07c(%0)\n\t"
: : "r" (p) );
p += 0x080;
}
write_c0_status(flags);
}
static inline void r3k_flush_cache_all(void)
{
}
static inline void r3k___flush_cache_all(void)
{
r3k_flush_dcache_range(KSEG0, KSEG0 + dcache_size);
r3k_flush_icache_range(KSEG0, KSEG0 + icache_size);
}
static void r3k_flush_cache_mm(struct mm_struct *mm)
{
}
static void r3k_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}
static void r3k_flush_cache_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pr_debug("cpage[%08lx,%08lx]\n",
cpu_context(smp_processor_id(), mm), addr);
/* No ASID => no such page in the cache. */
if (cpu_context(smp_processor_id(), mm) == 0)
return;
pgdp = pgd_offset(mm, addr);
pudp = pud_offset(pgdp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset(pmdp, addr);
/* Invalid => no such page in the cache. */
if (!(pte_val(*ptep) & _PAGE_PRESENT))
return;
r3k_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
if (exec)
r3k_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
}
static void local_r3k_flush_data_cache_page(void *addr)
{
}
static void r3k_flush_data_cache_page(unsigned long addr)
{
}
static void r3k_flush_cache_sigtramp(unsigned long addr)
{
unsigned long flags;
pr_debug("csigtramp[%08lx]\n", addr);
flags = read_c0_status();
write_c0_status(flags&~ST0_IEC);
/* Fill the TLB to avoid an exception with caches isolated. */
asm( "lw\t$0, 0x000(%0)\n\t"
"lw\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
write_c0_status(flags);
}
static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
BUG();
}
static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
{
/* Catch bad driver code */
BUG_ON(size == 0);
iob();
r3k_flush_dcache_range(start, start + size);
}
void r3k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
r3k_probe_cache();
flush_cache_all = r3k_flush_cache_all;
__flush_cache_all = r3k___flush_cache_all;
flush_cache_mm = r3k_flush_cache_mm;
flush_cache_range = r3k_flush_cache_range;
flush_cache_page = r3k_flush_cache_page;
flush_icache_range = r3k_flush_icache_range;
local_flush_icache_range = r3k_flush_icache_range;
__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
flush_cache_sigtramp = r3k_flush_cache_sigtramp;
local_flush_data_cache_page = local_r3k_flush_data_cache_page;
flush_data_cache_page = r3k_flush_data_cache_page;
_dma_cache_wback_inv = r3k_dma_cache_wback_inv;
_dma_cache_wback = r3k_dma_cache_wback_inv;
_dma_cache_inv = r3k_dma_cache_wback_inv;
printk("Primary instruction cache %ldkB, linesize %ld bytes.\n",
icache_size >> 10, icache_lsize);
printk("Primary data cache %ldkB, linesize %ld bytes.\n",
dcache_size >> 10, dcache_lsize);
build_clear_page();
build_copy_page();
}
| gpl-2.0 |
MoKee/android_kernel_mediatek_sprout | arch/mips/kernel/cevt-gic.c | 1922 | 2412 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/time.h>
#include <asm/gic.h>
#include <asm/mips-boards/maltaint.h>
DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
int gic_timer_irq_installed;
static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
{
u64 cnt;
int res;
cnt = gic_read_count();
cnt += (u64)delta;
gic_write_compare(cnt);
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
void gic_set_clock_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
/* Nothing to do ... */
}
irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd;
int cpu = smp_processor_id();
gic_write_compare(gic_read_compare());
cd = &per_cpu(gic_clockevent_device, cpu);
cd->event_handler(cd);
return IRQ_HANDLED;
}
struct irqaction gic_compare_irqaction = {
.handler = gic_compare_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER,
.name = "timer",
};
void gic_event_handler(struct clock_event_device *dev)
{
}
int __cpuinit gic_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
if (!cpu_has_counter || !gic_frequency)
return -ENXIO;
irq = MIPS_GIC_IRQ_BASE;
cd = &per_cpu(gic_clockevent_device, cpu);
cd->name = "MIPS GIC";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, gic_frequency);
/* Calculate the min / max delta */
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = gic_next_event;
cd->set_mode = gic_set_clock_mode;
cd->event_handler = gic_event_handler;
clockevents_register_device(cd);
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
if (gic_timer_irq_installed)
return 0;
gic_timer_irq_installed = 1;
setup_irq(irq, &gic_compare_irqaction);
irq_set_handler(irq, handle_percpu_irq);
return 0;
}
| gpl-2.0 |
spiderworthy/linux | drivers/hwmon/ad7418.c | 1922 | 7227 | /*
* An hwmon driver for the Analog Devices AD7416/17/18
* Copyright (C) 2006-07 Tower Technologies
*
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* Based on lm75.c
* Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License,
* as published by the Free Software Foundation - version 2.
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "lm75.h"
#define DRV_VERSION "0.4"
enum chips { ad7416, ad7417, ad7418 };
/* AD7418 registers */
#define AD7418_REG_TEMP_IN 0x00
#define AD7418_REG_CONF 0x01
#define AD7418_REG_TEMP_HYST 0x02
#define AD7418_REG_TEMP_OS 0x03
#define AD7418_REG_ADC 0x04
#define AD7418_REG_CONF2 0x05
#define AD7418_REG_ADC_CH(x) ((x) << 5)
#define AD7418_CH_TEMP AD7418_REG_ADC_CH(0)
static const u8 AD7418_REG_TEMP[] = { AD7418_REG_TEMP_IN,
AD7418_REG_TEMP_HYST,
AD7418_REG_TEMP_OS };
struct ad7418_data {
struct i2c_client *client;
enum chips type;
struct mutex lock;
int adc_max; /* number of ADC channels */
char valid;
unsigned long last_updated; /* In jiffies */
s16 temp[3]; /* Register values */
u16 in[4];
};
static struct ad7418_data *ad7418_update_device(struct device *dev)
{
struct ad7418_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
mutex_lock(&data->lock);
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
u8 cfg;
int i, ch;
/* read config register and clear channel bits */
cfg = i2c_smbus_read_byte_data(client, AD7418_REG_CONF);
cfg &= 0x1F;
i2c_smbus_write_byte_data(client, AD7418_REG_CONF,
cfg | AD7418_CH_TEMP);
udelay(30);
for (i = 0; i < 3; i++) {
data->temp[i] =
i2c_smbus_read_word_swapped(client,
AD7418_REG_TEMP[i]);
}
for (i = 0, ch = 4; i < data->adc_max; i++, ch--) {
i2c_smbus_write_byte_data(client,
AD7418_REG_CONF,
cfg | AD7418_REG_ADC_CH(ch));
udelay(15);
data->in[data->adc_max - 1 - i] =
i2c_smbus_read_word_swapped(client,
AD7418_REG_ADC);
}
/* restore old configuration value */
i2c_smbus_write_word_swapped(client, AD7418_REG_CONF, cfg);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->lock);
return data;
}
static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct ad7418_data *data = ad7418_update_device(dev);
return sprintf(buf, "%d\n",
LM75_TEMP_FROM_REG(data->temp[attr->index]));
}
static ssize_t show_adc(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct ad7418_data *data = ad7418_update_device(dev);
return sprintf(buf, "%d\n",
((data->in[attr->index] >> 6) * 2500 + 512) / 1024);
}
static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct ad7418_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long temp;
int ret = kstrtol(buf, 10, &temp);
if (ret < 0)
return ret;
mutex_lock(&data->lock);
data->temp[attr->index] = LM75_TEMP_TO_REG(temp);
i2c_smbus_write_word_swapped(client,
AD7418_REG_TEMP[attr->index],
data->temp[attr->index]);
mutex_unlock(&data->lock);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
show_temp, set_temp, 1);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
show_temp, set_temp, 2);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_adc, NULL, 0);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_adc, NULL, 1);
static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_adc, NULL, 2);
static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_adc, NULL, 3);
static struct attribute *ad7416_attrs[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(ad7416);
static struct attribute *ad7417_attrs[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(ad7417);
static struct attribute *ad7418_attrs[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(ad7418);
static void ad7418_init_client(struct i2c_client *client)
{
struct ad7418_data *data = i2c_get_clientdata(client);
int reg = i2c_smbus_read_byte_data(client, AD7418_REG_CONF);
if (reg < 0) {
dev_err(&client->dev, "cannot read configuration register\n");
} else {
dev_info(&client->dev, "configuring for mode 1\n");
i2c_smbus_write_byte_data(client, AD7418_REG_CONF, reg & 0xfe);
if (data->type == ad7417 || data->type == ad7418)
i2c_smbus_write_byte_data(client,
AD7418_REG_CONF2, 0x00);
}
}
static int ad7418_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct i2c_adapter *adapter = client->adapter;
struct ad7418_data *data;
struct device *hwmon_dev;
const struct attribute_group **attr_groups = NULL;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
data = devm_kzalloc(dev, sizeof(struct ad7418_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->lock);
data->client = client;
data->type = id->driver_data;
switch (data->type) {
case ad7416:
data->adc_max = 0;
attr_groups = ad7416_groups;
break;
case ad7417:
data->adc_max = 4;
attr_groups = ad7417_groups;
break;
case ad7418:
data->adc_max = 1;
attr_groups = ad7418_groups;
break;
}
dev_info(dev, "%s chip found\n", client->name);
/* Initialize the AD7418 chip */
ad7418_init_client(client);
hwmon_dev = devm_hwmon_device_register_with_groups(dev,
client->name,
data, attr_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ad7418_id[] = {
{ "ad7416", ad7416 },
{ "ad7417", ad7417 },
{ "ad7418", ad7418 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad7418_id);
static struct i2c_driver ad7418_driver = {
.driver = {
.name = "ad7418",
},
.probe = ad7418_probe,
.id_table = ad7418_id,
};
module_i2c_driver(ad7418_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("AD7416/17/18 driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
allan888/Linux_anti_malware_file_system | fs/squashfs/zlib_wrapper.c | 2178 | 3150 | /*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* zlib_wrapper.c
*/
#include <linux/mutex.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/zlib.h>
#include <linux/vmalloc.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
static void *zlib_init(struct squashfs_sb_info *dummy, void *buff)
{
z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
if (stream == NULL)
goto failed;
stream->workspace = vmalloc(zlib_inflate_workspacesize());
if (stream->workspace == NULL)
goto failed;
return stream;
failed:
ERROR("Failed to allocate zlib workspace\n");
kfree(stream);
return ERR_PTR(-ENOMEM);
}
static void zlib_free(void *strm)
{
z_stream *stream = strm;
if (stream)
vfree(stream->workspace);
kfree(stream);
}
static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
int zlib_err, zlib_init = 0, k = 0;
z_stream *stream = strm;
stream->avail_out = PAGE_CACHE_SIZE;
stream->next_out = squashfs_first_page(output);
stream->avail_in = 0;
do {
if (stream->avail_in == 0 && k < b) {
int avail = min(length, msblk->devblksize - offset);
length -= avail;
stream->next_in = bh[k]->b_data + offset;
stream->avail_in = avail;
offset = 0;
}
if (stream->avail_out == 0) {
stream->next_out = squashfs_next_page(output);
if (stream->next_out != NULL)
stream->avail_out = PAGE_CACHE_SIZE;
}
if (!zlib_init) {
zlib_err = zlib_inflateInit(stream);
if (zlib_err != Z_OK) {
squashfs_finish_page(output);
goto out;
}
zlib_init = 1;
}
zlib_err = zlib_inflate(stream, Z_SYNC_FLUSH);
if (stream->avail_in == 0 && k < b)
put_bh(bh[k++]);
} while (zlib_err == Z_OK);
squashfs_finish_page(output);
if (zlib_err != Z_STREAM_END)
goto out;
zlib_err = zlib_inflateEnd(stream);
if (zlib_err != Z_OK)
goto out;
if (k < b)
goto out;
return stream->total_out;
out:
for (; k < b; k++)
put_bh(bh[k]);
return -EIO;
}
const struct squashfs_decompressor squashfs_zlib_comp_ops = {
.init = zlib_init,
.free = zlib_free,
.decompress = zlib_uncompress,
.id = ZLIB_COMPRESSION,
.name = "zlib",
.supported = 1
};
| gpl-2.0 |
MoKee/android_kernel_zte_nx511j | drivers/net/wireless/b43legacy/dma.c | 2178 | 37603 | /*
Broadcom B43legacy wireless driver
DMA ringbuffer and descriptor allocation/management
Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
Some code in this file is derived from the b44.c driver
Copyright (C) 2002 David S. Miller
Copyright (C) Pekka Pietikainen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "b43legacy.h"
#include "dma.h"
#include "main.h"
#include "debugfs.h"
#include "xmit.h"
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/dst.h>
/* 32bit DMA ops. */
static
struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
int slot,
struct b43legacy_dmadesc_meta **meta)
{
struct b43legacy_dmadesc32 *desc;
*meta = &(ring->meta[slot]);
desc = ring->descbase;
desc = &(desc[slot]);
return desc;
}
static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
struct b43legacy_dmadesc32 *desc,
dma_addr_t dmaaddr, u16 bufsize,
int start, int end, int irq)
{
struct b43legacy_dmadesc32 *descbase = ring->descbase;
int slot;
u32 ctl;
u32 addr;
u32 addrext;
slot = (int)(desc - descbase);
B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
>> SSB_DMA_TRANSLATION_SHIFT;
addr |= ring->dev->dma.translation;
ctl = (bufsize - ring->frameoffset)
& B43legacy_DMA32_DCTL_BYTECNT;
if (slot == ring->nr_slots - 1)
ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
if (start)
ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
if (end)
ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
if (irq)
ctl |= B43legacy_DMA32_DCTL_IRQ;
ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
& B43legacy_DMA32_DCTL_ADDREXT_MASK;
desc->control = cpu_to_le32(ctl);
desc->address = cpu_to_le32(addr);
}
static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
{
b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
(u32)(slot * sizeof(struct b43legacy_dmadesc32)));
}
static void op32_tx_suspend(struct b43legacy_dmaring *ring)
{
b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
| B43legacy_DMA32_TXSUSPEND);
}
static void op32_tx_resume(struct b43legacy_dmaring *ring)
{
b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
& ~B43legacy_DMA32_TXSUSPEND);
}
static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
{
u32 val;
val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
val &= B43legacy_DMA32_RXDPTR;
return (val / sizeof(struct b43legacy_dmadesc32));
}
static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
int slot)
{
b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
(u32)(slot * sizeof(struct b43legacy_dmadesc32)));
}
static inline int free_slots(struct b43legacy_dmaring *ring)
{
return (ring->nr_slots - ring->used_slots);
}
static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
{
B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
if (slot == ring->nr_slots - 1)
return 0;
return slot + 1;
}
static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
{
B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
if (slot == 0)
return ring->nr_slots - 1;
return slot - 1;
}
#ifdef CONFIG_B43LEGACY_DEBUG
static void update_max_used_slots(struct b43legacy_dmaring *ring,
int current_used_slots)
{
if (current_used_slots <= ring->max_used_slots)
return;
ring->max_used_slots = current_used_slots;
if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(ring->dev->wl,
"max_used_slots increased to %d on %s ring %d\n",
ring->max_used_slots,
ring->tx ? "TX" : "RX",
ring->index);
}
#else
static inline
void update_max_used_slots(struct b43legacy_dmaring *ring,
int current_used_slots)
{ }
#endif /* DEBUG */
/* Request a slot for usage. */
static inline
int request_slot(struct b43legacy_dmaring *ring)
{
int slot;
B43legacy_WARN_ON(!ring->tx);
B43legacy_WARN_ON(ring->stopped);
B43legacy_WARN_ON(free_slots(ring) == 0);
slot = next_slot(ring, ring->current_slot);
ring->current_slot = slot;
ring->used_slots++;
update_max_used_slots(ring, ring->used_slots);
return slot;
}
/* Mac80211-queue to b43legacy-ring mapping */
static struct b43legacy_dmaring *priority_to_txring(
struct b43legacy_wldev *dev,
int queue_priority)
{
struct b43legacy_dmaring *ring;
/*FIXME: For now we always run on TX-ring-1 */
return dev->dma.tx_ring1;
/* 0 = highest priority */
switch (queue_priority) {
default:
B43legacy_WARN_ON(1);
/* fallthrough */
case 0:
ring = dev->dma.tx_ring3;
break;
case 1:
ring = dev->dma.tx_ring2;
break;
case 2:
ring = dev->dma.tx_ring1;
break;
case 3:
ring = dev->dma.tx_ring0;
break;
case 4:
ring = dev->dma.tx_ring4;
break;
case 5:
ring = dev->dma.tx_ring5;
break;
}
return ring;
}
/* Bcm4301-ring to mac80211-queue mapping */
static inline int txring_to_priority(struct b43legacy_dmaring *ring)
{
static const u8 idx_to_prio[] =
{ 3, 2, 1, 0, 4, 5, };
/*FIXME: have only one queue, for now */
return 0;
return idx_to_prio[ring->index];
}
static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
int controller_idx)
{
static const u16 map32[] = {
B43legacy_MMIO_DMA32_BASE0,
B43legacy_MMIO_DMA32_BASE1,
B43legacy_MMIO_DMA32_BASE2,
B43legacy_MMIO_DMA32_BASE3,
B43legacy_MMIO_DMA32_BASE4,
B43legacy_MMIO_DMA32_BASE5,
};
B43legacy_WARN_ON(!(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map32)));
return map32[controller_idx];
}
static inline
dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
unsigned char *buf,
size_t len,
int tx)
{
dma_addr_t dmaaddr;
if (tx)
dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len,
DMA_TO_DEVICE);
else
dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len,
DMA_FROM_DEVICE);
return dmaaddr;
}
static inline
void unmap_descbuffer(struct b43legacy_dmaring *ring,
dma_addr_t addr,
size_t len,
int tx)
{
if (tx)
dma_unmap_single(ring->dev->dev->dma_dev,
addr, len,
DMA_TO_DEVICE);
else
dma_unmap_single(ring->dev->dev->dma_dev,
addr, len,
DMA_FROM_DEVICE);
}
static inline
void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
dma_addr_t addr,
size_t len)
{
B43legacy_WARN_ON(ring->tx);
dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE);
}
static inline
void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
dma_addr_t addr,
size_t len)
{
B43legacy_WARN_ON(ring->tx);
dma_sync_single_for_device(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE);
}
static inline
void free_descriptor_buffer(struct b43legacy_dmaring *ring,
struct b43legacy_dmadesc_meta *meta,
int irq_context)
{
if (meta->skb) {
if (irq_context)
dev_kfree_skb_irq(meta->skb);
else
dev_kfree_skb(meta->skb);
meta->skb = NULL;
}
}
static int alloc_ringmemory(struct b43legacy_dmaring *ring)
{
/* GFP flags must match the flags in free_ringmemory()! */
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
B43legacy_DMA_RINGMEMSIZE,
&(ring->dmabase),
GFP_KERNEL | __GFP_ZERO);
if (!ring->descbase)
return -ENOMEM;
return 0;
}
static void free_ringmemory(struct b43legacy_dmaring *ring)
{
dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
ring->descbase, ring->dmabase);
}
/* Reset the RX DMA channel */
static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
u16 mmio_base,
enum b43legacy_dmatype type)
{
int i;
u32 value;
u16 offset;
might_sleep();
offset = B43legacy_DMA32_RXCTL;
b43legacy_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
offset = B43legacy_DMA32_RXSTATUS;
value = b43legacy_read32(dev, mmio_base + offset);
value &= B43legacy_DMA32_RXSTATE;
if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
i = -1;
break;
}
msleep(1);
}
if (i != -1) {
b43legacyerr(dev->wl, "DMA RX reset timed out\n");
return -ENODEV;
}
return 0;
}
/* Reset the RX DMA channel */
static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
u16 mmio_base,
enum b43legacy_dmatype type)
{
int i;
u32 value;
u16 offset;
might_sleep();
for (i = 0; i < 10; i++) {
offset = B43legacy_DMA32_TXSTATUS;
value = b43legacy_read32(dev, mmio_base + offset);
value &= B43legacy_DMA32_TXSTATE;
if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
value == B43legacy_DMA32_TXSTAT_STOPPED)
break;
msleep(1);
}
offset = B43legacy_DMA32_TXCTL;
b43legacy_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
offset = B43legacy_DMA32_TXSTATUS;
value = b43legacy_read32(dev, mmio_base + offset);
value &= B43legacy_DMA32_TXSTATE;
if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
i = -1;
break;
}
msleep(1);
}
if (i != -1) {
b43legacyerr(dev->wl, "DMA TX reset timed out\n");
return -ENODEV;
}
/* ensure the reset is completed. */
msleep(1);
return 0;
}
/* Check if a DMA mapping address is invalid. */
static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
dma_addr_t addr,
size_t buffersize,
bool dma_to_device)
{
if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
return 1;
switch (ring->type) {
case B43legacy_DMA_30BIT:
if ((u64)addr + buffersize > (1ULL << 30))
goto address_error;
break;
case B43legacy_DMA_32BIT:
if ((u64)addr + buffersize > (1ULL << 32))
goto address_error;
break;
}
/* The address is OK. */
return 0;
address_error:
/* We can't support this address. Unmap it again. */
unmap_descbuffer(ring, addr, buffersize, dma_to_device);
return 1;
}
static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
struct b43legacy_dmadesc32 *desc,
struct b43legacy_dmadesc_meta *meta,
gfp_t gfp_flags)
{
struct b43legacy_rxhdr_fw3 *rxhdr;
struct b43legacy_hwtxstatus *txstat;
dma_addr_t dmaaddr;
struct sk_buff *skb;
B43legacy_WARN_ON(ring->tx);
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb))
return -ENOMEM;
dmaaddr = map_descbuffer(ring, skb->data,
ring->rx_buffersize, 0);
if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
/* ugh. try to realloc in zone_dma */
gfp_flags |= GFP_DMA;
dev_kfree_skb_any(skb);
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb))
return -ENOMEM;
dmaaddr = map_descbuffer(ring, skb->data,
ring->rx_buffersize, 0);
}
if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
dev_kfree_skb_any(skb);
return -EIO;
}
meta->skb = skb;
meta->dmaaddr = dmaaddr;
op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
rxhdr->frame_len = 0;
txstat = (struct b43legacy_hwtxstatus *)(skb->data);
txstat->cookie = 0;
return 0;
}
/* Allocate the initial descbuffers.
* This is used for an RX ring only.
*/
static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
{
int i;
int err = -ENOMEM;
struct b43legacy_dmadesc32 *desc;
struct b43legacy_dmadesc_meta *meta;
for (i = 0; i < ring->nr_slots; i++) {
desc = op32_idx2desc(ring, i, &meta);
err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
if (err) {
b43legacyerr(ring->dev->wl,
"Failed to allocate initial descbuffers\n");
goto err_unwind;
}
}
mb(); /* all descbuffer setup before next line */
ring->used_slots = ring->nr_slots;
err = 0;
out:
return err;
err_unwind:
for (i--; i >= 0; i--) {
desc = op32_idx2desc(ring, i, &meta);
unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
dev_kfree_skb(meta->skb);
}
goto out;
}
/* Do initial setup of the DMA controller.
* Reset the controller, write the ring busaddress
* and switch the "enable" bit on.
*/
static int dmacontroller_setup(struct b43legacy_dmaring *ring)
{
int err = 0;
u32 value;
u32 addrext;
u32 trans = ring->dev->dma.translation;
u32 ringbase = (u32)(ring->dmabase);
if (ring->tx) {
addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
>> SSB_DMA_TRANSLATION_SHIFT;
value = B43legacy_DMA32_TXENABLE;
value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
& B43legacy_DMA32_TXADDREXT_MASK;
b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
(ringbase & ~SSB_DMA_TRANSLATION_MASK)
| trans);
} else {
err = alloc_initial_descbuffers(ring);
if (err)
goto out;
addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
>> SSB_DMA_TRANSLATION_SHIFT;
value = (ring->frameoffset <<
B43legacy_DMA32_RXFROFF_SHIFT);
value |= B43legacy_DMA32_RXENABLE;
value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
& B43legacy_DMA32_RXADDREXT_MASK;
b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
(ringbase & ~SSB_DMA_TRANSLATION_MASK)
| trans);
b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
}
out:
return err;
}
/* Shutdown the DMA controller. */
static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
{
if (ring->tx) {
b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
ring->type);
b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
} else {
b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
ring->type);
b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
}
}
static void free_all_descbuffers(struct b43legacy_dmaring *ring)
{
struct b43legacy_dmadesc_meta *meta;
int i;
if (!ring->used_slots)
return;
for (i = 0; i < ring->nr_slots; i++) {
op32_idx2desc(ring, i, &meta);
if (!meta->skb) {
B43legacy_WARN_ON(!ring->tx);
continue;
}
if (ring->tx)
unmap_descbuffer(ring, meta->dmaaddr,
meta->skb->len, 1);
else
unmap_descbuffer(ring, meta->dmaaddr,
ring->rx_buffersize, 0);
free_descriptor_buffer(ring, meta, 0);
}
}
static u64 supported_dma_mask(struct b43legacy_wldev *dev)
{
u32 tmp;
u16 mmio_base;
mmio_base = b43legacy_dmacontroller_base(0, 0);
b43legacy_write32(dev,
mmio_base + B43legacy_DMA32_TXCTL,
B43legacy_DMA32_TXADDREXT_MASK);
tmp = b43legacy_read32(dev, mmio_base +
B43legacy_DMA32_TXCTL);
if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
return DMA_BIT_MASK(32);
return DMA_BIT_MASK(30);
}
static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
{
if (dmamask == DMA_BIT_MASK(30))
return B43legacy_DMA_30BIT;
if (dmamask == DMA_BIT_MASK(32))
return B43legacy_DMA_32BIT;
B43legacy_WARN_ON(1);
return B43legacy_DMA_30BIT;
}
/* Main initialization function. */
static
struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
int controller_index,
int for_tx,
enum b43legacy_dmatype type)
{
struct b43legacy_dmaring *ring;
int err;
int nr_slots;
dma_addr_t dma_test;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto out;
ring->type = type;
ring->dev = dev;
nr_slots = B43legacy_RXRING_SLOTS;
if (for_tx)
nr_slots = B43legacy_TXRING_SLOTS;
ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
GFP_KERNEL);
if (!ring->meta)
goto err_kfree_ring;
if (for_tx) {
ring->txhdr_cache = kcalloc(nr_slots,
sizeof(struct b43legacy_txhdr_fw3),
GFP_KERNEL);
if (!ring->txhdr_cache)
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
if (b43legacy_dma_mapping_error(ring, dma_test,
sizeof(struct b43legacy_txhdr_fw3), 1)) {
/* ugh realloc */
kfree(ring->txhdr_cache);
ring->txhdr_cache = kcalloc(nr_slots,
sizeof(struct b43legacy_txhdr_fw3),
GFP_KERNEL | GFP_DMA);
if (!ring->txhdr_cache)
goto err_kfree_meta;
dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache,
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
if (b43legacy_dma_mapping_error(ring, dma_test,
sizeof(struct b43legacy_txhdr_fw3), 1))
goto err_kfree_txhdr_cache;
}
dma_unmap_single(dev->dev->dma_dev, dma_test,
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
}
ring->nr_slots = nr_slots;
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
ring->index = controller_index;
if (for_tx) {
ring->tx = true;
ring->current_slot = -1;
} else {
if (ring->index == 0) {
ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
} else if (ring->index == 3) {
ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
} else
B43legacy_WARN_ON(1);
}
#ifdef CONFIG_B43LEGACY_DEBUG
ring->last_injected_overflow = jiffies;
#endif
err = alloc_ringmemory(ring);
if (err)
goto err_kfree_txhdr_cache;
err = dmacontroller_setup(ring);
if (err)
goto err_free_ringmemory;
out:
return ring;
err_free_ringmemory:
free_ringmemory(ring);
err_kfree_txhdr_cache:
kfree(ring->txhdr_cache);
err_kfree_meta:
kfree(ring->meta);
err_kfree_ring:
kfree(ring);
ring = NULL;
goto out;
}
/* Main cleanup function. */
static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
{
if (!ring)
return;
b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
" %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
(ring->tx) ? "TX" : "RX", ring->max_used_slots,
ring->nr_slots);
/* Device IRQs are disabled prior entering this function,
* so no need to take care of concurrency with rx handler stuff.
*/
dmacontroller_cleanup(ring);
free_all_descbuffers(ring);
free_ringmemory(ring);
kfree(ring->txhdr_cache);
kfree(ring->meta);
kfree(ring);
}
void b43legacy_dma_free(struct b43legacy_wldev *dev)
{
struct b43legacy_dma *dma;
if (b43legacy_using_pio(dev))
return;
dma = &dev->dma;
b43legacy_destroy_dmaring(dma->rx_ring3);
dma->rx_ring3 = NULL;
b43legacy_destroy_dmaring(dma->rx_ring0);
dma->rx_ring0 = NULL;
b43legacy_destroy_dmaring(dma->tx_ring5);
dma->tx_ring5 = NULL;
b43legacy_destroy_dmaring(dma->tx_ring4);
dma->tx_ring4 = NULL;
b43legacy_destroy_dmaring(dma->tx_ring3);
dma->tx_ring3 = NULL;
b43legacy_destroy_dmaring(dma->tx_ring2);
dma->tx_ring2 = NULL;
b43legacy_destroy_dmaring(dma->tx_ring1);
dma->tx_ring1 = NULL;
b43legacy_destroy_dmaring(dma->tx_ring0);
dma->tx_ring0 = NULL;
}
static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
{
u64 orig_mask = mask;
bool fallback = false;
int err;
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
err = dma_set_mask(dev->dev->dma_dev, mask);
if (!err) {
err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
if (!err)
break;
}
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = true;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
mask = DMA_BIT_MASK(30);
fallback = true;
continue;
}
b43legacyerr(dev->wl, "The machine/kernel does not support "
"the required %u-bit DMA mask\n",
(unsigned int)dma_mask_to_engine_type(orig_mask));
return -EOPNOTSUPP;
}
if (fallback) {
b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-"
"bit\n",
(unsigned int)dma_mask_to_engine_type(orig_mask),
(unsigned int)dma_mask_to_engine_type(mask));
}
return 0;
}
int b43legacy_dma_init(struct b43legacy_wldev *dev)
{
struct b43legacy_dma *dma = &dev->dma;
struct b43legacy_dmaring *ring;
int err;
u64 dmamask;
enum b43legacy_dmatype type;
dmamask = supported_dma_mask(dev);
type = dma_mask_to_engine_type(dmamask);
err = b43legacy_dma_set_mask(dev, dmamask);
if (err) {
#ifdef CONFIG_B43LEGACY_PIO
b43legacywarn(dev->wl, "DMA for this device not supported. "
"Falling back to PIO\n");
dev->__using_pio = true;
return -EAGAIN;
#else
b43legacyerr(dev->wl, "DMA for this device not supported and "
"no PIO support compiled in\n");
return -EOPNOTSUPP;
#endif
}
dma->translation = ssb_dma_translation(dev->dev);
err = -ENOMEM;
/* setup TX DMA channels. */
ring = b43legacy_setup_dmaring(dev, 0, 1, type);
if (!ring)
goto out;
dma->tx_ring0 = ring;
ring = b43legacy_setup_dmaring(dev, 1, 1, type);
if (!ring)
goto err_destroy_tx0;
dma->tx_ring1 = ring;
ring = b43legacy_setup_dmaring(dev, 2, 1, type);
if (!ring)
goto err_destroy_tx1;
dma->tx_ring2 = ring;
ring = b43legacy_setup_dmaring(dev, 3, 1, type);
if (!ring)
goto err_destroy_tx2;
dma->tx_ring3 = ring;
ring = b43legacy_setup_dmaring(dev, 4, 1, type);
if (!ring)
goto err_destroy_tx3;
dma->tx_ring4 = ring;
ring = b43legacy_setup_dmaring(dev, 5, 1, type);
if (!ring)
goto err_destroy_tx4;
dma->tx_ring5 = ring;
/* setup RX DMA channels. */
ring = b43legacy_setup_dmaring(dev, 0, 0, type);
if (!ring)
goto err_destroy_tx5;
dma->rx_ring0 = ring;
if (dev->dev->id.revision < 5) {
ring = b43legacy_setup_dmaring(dev, 3, 0, type);
if (!ring)
goto err_destroy_rx0;
dma->rx_ring3 = ring;
}
b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
err = 0;
out:
return err;
err_destroy_rx0:
b43legacy_destroy_dmaring(dma->rx_ring0);
dma->rx_ring0 = NULL;
err_destroy_tx5:
b43legacy_destroy_dmaring(dma->tx_ring5);
dma->tx_ring5 = NULL;
err_destroy_tx4:
b43legacy_destroy_dmaring(dma->tx_ring4);
dma->tx_ring4 = NULL;
err_destroy_tx3:
b43legacy_destroy_dmaring(dma->tx_ring3);
dma->tx_ring3 = NULL;
err_destroy_tx2:
b43legacy_destroy_dmaring(dma->tx_ring2);
dma->tx_ring2 = NULL;
err_destroy_tx1:
b43legacy_destroy_dmaring(dma->tx_ring1);
dma->tx_ring1 = NULL;
err_destroy_tx0:
b43legacy_destroy_dmaring(dma->tx_ring0);
dma->tx_ring0 = NULL;
goto out;
}
/* Generate a cookie for the TX header. */
static u16 generate_cookie(struct b43legacy_dmaring *ring,
int slot)
{
u16 cookie = 0x1000;
/* Use the upper 4 bits of the cookie as
* DMA controller ID and store the slot number
* in the lower 12 bits.
* Note that the cookie must never be 0, as this
* is a special value used in RX path.
*/
switch (ring->index) {
case 0:
cookie = 0xA000;
break;
case 1:
cookie = 0xB000;
break;
case 2:
cookie = 0xC000;
break;
case 3:
cookie = 0xD000;
break;
case 4:
cookie = 0xE000;
break;
case 5:
cookie = 0xF000;
break;
}
B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
cookie |= (u16)slot;
return cookie;
}
/* Inspect a cookie and find out to which controller/slot it belongs. */
static
struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
u16 cookie, int *slot)
{
struct b43legacy_dma *dma = &dev->dma;
struct b43legacy_dmaring *ring = NULL;
switch (cookie & 0xF000) {
case 0xA000:
ring = dma->tx_ring0;
break;
case 0xB000:
ring = dma->tx_ring1;
break;
case 0xC000:
ring = dma->tx_ring2;
break;
case 0xD000:
ring = dma->tx_ring3;
break;
case 0xE000:
ring = dma->tx_ring4;
break;
case 0xF000:
ring = dma->tx_ring5;
break;
default:
B43legacy_WARN_ON(1);
}
*slot = (cookie & 0x0FFF);
B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
return ring;
}
static int dma_tx_fragment(struct b43legacy_dmaring *ring,
struct sk_buff **in_skb)
{
struct sk_buff *skb = *in_skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 *header;
int slot, old_top_slot, old_used_slots;
int err;
struct b43legacy_dmadesc32 *desc;
struct b43legacy_dmadesc_meta *meta;
struct b43legacy_dmadesc_meta *meta_hdr;
struct sk_buff *bounce_skb;
#define SLOTS_PER_PACKET 2
B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
old_top_slot = ring->current_slot;
old_used_slots = ring->used_slots;
/* Get a slot for the header. */
slot = request_slot(ring);
desc = op32_idx2desc(ring, slot, &meta_hdr);
memset(meta_hdr, 0, sizeof(*meta_hdr));
header = &(ring->txhdr_cache[slot * sizeof(
struct b43legacy_txhdr_fw3)]);
err = b43legacy_generate_txhdr(ring->dev, header,
skb->data, skb->len, info,
generate_cookie(ring, slot));
if (unlikely(err)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
return err;
}
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
sizeof(struct b43legacy_txhdr_fw3), 1);
if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
sizeof(struct b43legacy_txhdr_fw3), 1)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
return -EIO;
}
op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
/* Get a slot for the payload. */
slot = request_slot(ring);
desc = op32_idx2desc(ring, slot, &meta);
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
meta->is_last_fragment = true;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -ENOMEM;
goto out_unmap_hdr;
}
memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
bounce_skb->dev = skb->dev;
skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
info = IEEE80211_SKB_CB(bounce_skb);
dev_kfree_skb_any(skb);
skb = bounce_skb;
*in_skb = bounce_skb;
meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -EIO;
goto out_free_bounce;
}
}
op32_fill_descriptor(ring, desc, meta->dmaaddr,
skb->len, 0, 1, 1);
wmb(); /* previous stuff MUST be done */
/* Now transfer the whole frame. */
op32_poke_tx(ring, next_slot(ring, slot));
return 0;
out_free_bounce:
dev_kfree_skb_any(skb);
out_unmap_hdr:
unmap_descbuffer(ring, meta_hdr->dmaaddr,
sizeof(struct b43legacy_txhdr_fw3), 1);
return err;
}
static inline
int should_inject_overflow(struct b43legacy_dmaring *ring)
{
#ifdef CONFIG_B43LEGACY_DEBUG
if (unlikely(b43legacy_debug(ring->dev,
B43legacy_DBG_DMAOVERFLOW))) {
/* Check if we should inject another ringbuffer overflow
* to test handling of this situation in the stack. */
unsigned long next_overflow;
next_overflow = ring->last_injected_overflow + HZ;
if (time_after(jiffies, next_overflow)) {
ring->last_injected_overflow = jiffies;
b43legacydbg(ring->dev->wl,
"Injecting TX ring overflow on "
"DMA controller %d\n", ring->index);
return 1;
}
}
#endif /* CONFIG_B43LEGACY_DEBUG */
return 0;
}
int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb)
{
struct b43legacy_dmaring *ring;
int err = 0;
ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
B43legacy_WARN_ON(!ring->tx);
if (unlikely(ring->stopped)) {
/* We get here only because of a bug in mac80211.
* Because of a race, one packet may be queued after
* the queue is stopped, thus we got called when we shouldn't.
* For now, just refuse the transmit. */
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacyerr(dev->wl, "Packet after queue stopped\n");
return -ENOSPC;
}
if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
/* If we get here, we have a real error with the queue
* full, but queues not stopped. */
b43legacyerr(dev->wl, "DMA queue overflow\n");
return -ENOSPC;
}
/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
* into the skb data or cb now. */
err = dma_tx_fragment(ring, &skb);
if (unlikely(err == -ENOKEY)) {
/* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb);
return 0;
}
if (unlikely(err)) {
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
return err;
}
if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
ieee80211_stop_queue(dev->wl->hw, skb_mapping);
dev->wl->tx_queue_stopped[skb_mapping] = 1;
ring->stopped = true;
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Stopped TX ring %d\n",
ring->index);
}
return err;
}
void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
const struct b43legacy_txstatus *status)
{
struct b43legacy_dmaring *ring;
struct b43legacy_dmadesc_meta *meta;
int retry_limit;
int slot;
int firstused;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
return;
B43legacy_WARN_ON(!ring->tx);
/* Sanity check: TX packets are processed in-order on one ring.
* Check if the slot deduced from the cookie really is the first
* used slot. */
firstused = ring->current_slot - ring->used_slots + 1;
if (firstused < 0)
firstused = ring->nr_slots + firstused;
if (unlikely(slot != firstused)) {
/* This possibly is a firmware bug and will result in
* malfunction, memory leaks and/or stall of DMA functionality.
*/
b43legacydbg(dev->wl, "Out of order TX status report on DMA "
"ring %d. Expected %d, but got %d\n",
ring->index, firstused, slot);
return;
}
while (1) {
B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
op32_idx2desc(ring, slot, &meta);
if (meta->skb)
unmap_descbuffer(ring, meta->dmaaddr,
meta->skb->len, 1);
else
unmap_descbuffer(ring, meta->dmaaddr,
sizeof(struct b43legacy_txhdr_fw3),
1);
if (meta->is_last_fragment) {
struct ieee80211_tx_info *info;
BUG_ON(!meta->skb);
info = IEEE80211_SKB_CB(meta->skb);
/* preserve the confiured retry limit before clearing the status
* The xmit function has overwritten the rc's value with the actual
* retry limit done by the hardware */
retry_limit = info->status.rates[0].count;
ieee80211_tx_info_clear_status(info);
if (status->acked)
info->flags |= IEEE80211_TX_STAT_ACK;
if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
/*
* If the short retries (RTS, not data frame) have exceeded
* the limit, the hw will not have tried the selected rate,
* but will have used the fallback rate instead.
* Don't let the rate control count attempts for the selected
* rate in this case, otherwise the statistics will be off.
*/
info->status.rates[0].count = 0;
info->status.rates[1].count = status->frame_count;
} else {
if (status->frame_count > retry_limit) {
info->status.rates[0].count = retry_limit;
info->status.rates[1].count = status->frame_count -
retry_limit;
} else {
info->status.rates[0].count = status->frame_count;
info->status.rates[1].idx = -1;
}
}
/* Call back to inform the ieee80211 subsystem about the
* status of the transmission.
* Some fields of txstat are already filled in dma_tx().
*/
ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
/* skb is freed by ieee80211_tx_status_irqsafe() */
meta->skb = NULL;
} else {
/* No need to call free_descriptor_buffer here, as
* this is only the txhdr, which is not allocated.
*/
B43legacy_WARN_ON(meta->skb != NULL);
}
/* Everything unmapped and free'd. So it's not used anymore. */
ring->used_slots--;
if (meta->is_last_fragment)
break;
slot = next_slot(ring, slot);
}
dev->stats.last_tx = jiffies;
if (ring->stopped) {
B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
ring->stopped = false;
}
if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Woke up TX ring %d\n",
ring->index);
}
/* Add work to the queue. */
ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
static void dma_rx(struct b43legacy_dmaring *ring,
int *slot)
{
struct b43legacy_dmadesc32 *desc;
struct b43legacy_dmadesc_meta *meta;
struct b43legacy_rxhdr_fw3 *rxhdr;
struct sk_buff *skb;
u16 len;
int err;
dma_addr_t dmaaddr;
desc = op32_idx2desc(ring, *slot, &meta);
sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
skb = meta->skb;
if (ring->index == 3) {
/* We received an xmit status. */
struct b43legacy_hwtxstatus *hw =
(struct b43legacy_hwtxstatus *)skb->data;
int i = 0;
while (hw->cookie == 0) {
if (i > 100)
break;
i++;
udelay(2);
barrier();
}
b43legacy_handle_hwtxstatus(ring->dev, hw);
/* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
return;
}
rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
len = le16_to_cpu(rxhdr->frame_len);
if (len == 0) {
int i = 0;
do {
udelay(2);
barrier();
len = le16_to_cpu(rxhdr->frame_len);
} while (len == 0 && i++ < 5);
if (unlikely(len == 0)) {
/* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
goto drop;
}
}
if (unlikely(len > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer
* and is split over multiple buffers.
* This should never happen, as we try to allocate buffers
* big enough. So simply ignore this packet.
*/
int cnt = 0;
s32 tmp = len;
while (1) {
desc = op32_idx2desc(ring, *slot, &meta);
/* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
*slot = next_slot(ring, *slot);
cnt++;
tmp -= ring->rx_buffersize;
if (tmp <= 0)
break;
}
b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
"(len: %u, buffer: %u, nr-dropped: %d)\n",
len, ring->rx_buffersize, cnt);
goto drop;
}
dmaaddr = meta->dmaaddr;
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
if (unlikely(err)) {
b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
" failed\n");
sync_descbuffer_for_device(ring, dmaaddr,
ring->rx_buffersize);
goto drop;
}
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
skb_put(skb, len + ring->frameoffset);
skb_pull(skb, ring->frameoffset);
b43legacy_rx(ring->dev, skb, rxhdr);
drop:
return;
}
void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
{
int slot;
int current_slot;
int used_slots = 0;
B43legacy_WARN_ON(ring->tx);
current_slot = op32_get_current_rxslot(ring);
B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
ring->nr_slots));
slot = ring->current_slot;
for (; slot != current_slot; slot = next_slot(ring, slot)) {
dma_rx(ring, &slot);
update_max_used_slots(ring, ++used_slots);
}
op32_set_current_rxslot(ring, slot);
ring->current_slot = slot;
}
static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
{
B43legacy_WARN_ON(!ring->tx);
op32_tx_suspend(ring);
}
static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
{
B43legacy_WARN_ON(!ring->tx);
op32_tx_resume(ring);
}
void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
{
b43legacy_power_saving_ctl_bits(dev, -1, 1);
b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
}
void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
{
b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
b43legacy_power_saving_ctl_bits(dev, -1, -1);
}
| gpl-2.0 |
CyanogenMod/android_kernel_motorola_msm8610 | arch/arm/mach-s3c64xx/cpuidle.c | 4738 | 2200 | /* linux/arch/arm/mach-s3c64xx/cpuidle.c
*
* Copyright (c) 2011 Wolfson Microelectronics, plc
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/cpuidle.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/time.h>
#include <asm/proc-fns.h>
#include <mach/map.h>
#include <mach/regs-sys.h>
#include <mach/regs-syscon-power.h>
static int s3c64xx_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
struct timeval before, after;
unsigned long tmp;
int idle_time;
local_irq_disable();
do_gettimeofday(&before);
/* Setup PWRCFG to enter idle mode */
tmp = __raw_readl(S3C64XX_PWR_CFG);
tmp &= ~S3C64XX_PWRCFG_CFG_WFI_MASK;
tmp |= S3C64XX_PWRCFG_CFG_WFI_IDLE;
__raw_writel(tmp, S3C64XX_PWR_CFG);
cpu_do_idle();
do_gettimeofday(&after);
local_irq_enable();
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
(after.tv_usec - before.tv_usec);
dev->last_residency = idle_time;
return index;
}
static struct cpuidle_state s3c64xx_cpuidle_set[] = {
[0] = {
.enter = s3c64xx_enter_idle,
.exit_latency = 1,
.target_residency = 1,
.flags = CPUIDLE_FLAG_TIME_VALID,
.name = "IDLE",
.desc = "System active, ARM gated",
},
};
static struct cpuidle_driver s3c64xx_cpuidle_driver = {
.name = "s3c64xx_cpuidle",
.owner = THIS_MODULE,
.state_count = ARRAY_SIZE(s3c64xx_cpuidle_set),
};
static struct cpuidle_device s3c64xx_cpuidle_device = {
.state_count = ARRAY_SIZE(s3c64xx_cpuidle_set),
};
static int __init s3c64xx_init_cpuidle(void)
{
int ret;
memcpy(s3c64xx_cpuidle_driver.states, s3c64xx_cpuidle_set,
sizeof(s3c64xx_cpuidle_set));
cpuidle_register_driver(&s3c64xx_cpuidle_driver);
ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
if (ret) {
pr_err("Failed to register cpuidle device: %d\n", ret);
return ret;
}
return 0;
}
device_initcall(s3c64xx_init_cpuidle);
| gpl-2.0 |
Split-Screen/android_kernel_lge_hammerhead | arch/arm/mach-s3c24xx/dma-s3c2443.c | 4738 | 4140 | /* linux/arch/arm/mach-s3c2443/dma.c
*
* Copyright (c) 2007 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2443 DMA selection
*
* http://armlinux.simtec.co.uk/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <mach/dma.h>
#include <plat/dma-s3c24xx.h>
#include <plat/cpu.h>
#include <plat/regs-serial.h>
#include <mach/regs-gpio.h>
#include <plat/regs-ac97.h>
#include <plat/regs-dma.h>
#include <mach/regs-mem.h>
#include <mach/regs-lcd.h>
#include <mach/regs-sdi.h>
#include <plat/regs-iis.h>
#include <plat/regs-spi.h>
#define MAP(x) { \
[0] = (x) | DMA_CH_VALID, \
[1] = (x) | DMA_CH_VALID, \
[2] = (x) | DMA_CH_VALID, \
[3] = (x) | DMA_CH_VALID, \
[4] = (x) | DMA_CH_VALID, \
[5] = (x) | DMA_CH_VALID, \
}
static struct s3c24xx_dma_map __initdata s3c2443_dma_mappings[] = {
[DMACH_XD0] = {
.name = "xdreq0",
.channels = MAP(S3C2443_DMAREQSEL_XDREQ0),
},
[DMACH_XD1] = {
.name = "xdreq1",
.channels = MAP(S3C2443_DMAREQSEL_XDREQ1),
},
[DMACH_SDI] = { /* only on S3C2443 */
.name = "sdi",
.channels = MAP(S3C2443_DMAREQSEL_SDI),
},
[DMACH_SPI0] = {
.name = "spi0",
.channels = MAP(S3C2443_DMAREQSEL_SPI0TX),
},
[DMACH_SPI1] = { /* only on S3C2443/S3C2450 */
.name = "spi1",
.channels = MAP(S3C2443_DMAREQSEL_SPI1TX),
},
[DMACH_UART0] = {
.name = "uart0",
.channels = MAP(S3C2443_DMAREQSEL_UART0_0),
},
[DMACH_UART1] = {
.name = "uart1",
.channels = MAP(S3C2443_DMAREQSEL_UART1_0),
},
[DMACH_UART2] = {
.name = "uart2",
.channels = MAP(S3C2443_DMAREQSEL_UART2_0),
},
[DMACH_UART3] = {
.name = "uart3",
.channels = MAP(S3C2443_DMAREQSEL_UART3_0),
},
[DMACH_UART0_SRC2] = {
.name = "uart0",
.channels = MAP(S3C2443_DMAREQSEL_UART0_1),
},
[DMACH_UART1_SRC2] = {
.name = "uart1",
.channels = MAP(S3C2443_DMAREQSEL_UART1_1),
},
[DMACH_UART2_SRC2] = {
.name = "uart2",
.channels = MAP(S3C2443_DMAREQSEL_UART2_1),
},
[DMACH_UART3_SRC2] = {
.name = "uart3",
.channels = MAP(S3C2443_DMAREQSEL_UART3_1),
},
[DMACH_TIMER] = {
.name = "timer",
.channels = MAP(S3C2443_DMAREQSEL_TIMER),
},
[DMACH_I2S_IN] = {
.name = "i2s-sdi",
.channels = MAP(S3C2443_DMAREQSEL_I2SRX),
},
[DMACH_I2S_OUT] = {
.name = "i2s-sdo",
.channels = MAP(S3C2443_DMAREQSEL_I2STX),
},
[DMACH_PCM_IN] = {
.name = "pcm-in",
.channels = MAP(S3C2443_DMAREQSEL_PCMIN),
},
[DMACH_PCM_OUT] = {
.name = "pcm-out",
.channels = MAP(S3C2443_DMAREQSEL_PCMOUT),
},
[DMACH_MIC_IN] = {
.name = "mic-in",
.channels = MAP(S3C2443_DMAREQSEL_MICIN),
},
};
static void s3c2443_dma_select(struct s3c2410_dma_chan *chan,
struct s3c24xx_dma_map *map)
{
writel(map->channels[0] | S3C2443_DMAREQSEL_HW,
chan->regs + S3C2443_DMA_DMAREQSEL);
}
static struct s3c24xx_dma_selection __initdata s3c2443_dma_sel = {
.select = s3c2443_dma_select,
.dcon_mask = 0,
.map = s3c2443_dma_mappings,
.map_size = ARRAY_SIZE(s3c2443_dma_mappings),
};
static int __init s3c2443_dma_add(struct device *dev,
struct subsys_interface *sif)
{
s3c24xx_dma_init(6, IRQ_S3C2443_DMA0, 0x100);
return s3c24xx_dma_init_map(&s3c2443_dma_sel);
}
#ifdef CONFIG_CPU_S3C2416
/* S3C2416 DMA contains the same selection table as the S3C2443 */
static struct subsys_interface s3c2416_dma_interface = {
.name = "s3c2416_dma",
.subsys = &s3c2416_subsys,
.add_dev = s3c2443_dma_add,
};
static int __init s3c2416_dma_init(void)
{
return subsys_interface_register(&s3c2416_dma_interface);
}
arch_initcall(s3c2416_dma_init);
#endif
#ifdef CONFIG_CPU_S3C2443
static struct subsys_interface s3c2443_dma_interface = {
.name = "s3c2443_dma",
.subsys = &s3c2443_subsys,
.add_dev = s3c2443_dma_add,
};
static int __init s3c2443_dma_init(void)
{
return subsys_interface_register(&s3c2443_dma_interface);
}
arch_initcall(s3c2443_dma_init);
#endif
| gpl-2.0 |
infected-lp/kernel_sony_msm8974 | drivers/gpu/drm/radeon/radeon_cp.c | 5250 | 66569 | /* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
/*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* Copyright 2007 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#include <linux/module.h>
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
#define RADEON_FIFO_DEBUG 0
/* Firmware Names */
#define FIRMWARE_R100 "radeon/R100_cp.bin"
#define FIRMWARE_R200 "radeon/R200_cp.bin"
#define FIRMWARE_R300 "radeon/R300_cp.bin"
#define FIRMWARE_R420 "radeon/R420_cp.bin"
#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
#define FIRMWARE_R520 "radeon/R520_cp.bin"
MODULE_FIRMWARE(FIRMWARE_R100);
MODULE_FIRMWARE(FIRMWARE_R200);
MODULE_FIRMWARE(FIRMWARE_R300);
MODULE_FIRMWARE(FIRMWARE_R420);
MODULE_FIRMWARE(FIRMWARE_RS690);
MODULE_FIRMWARE(FIRMWARE_RS600);
MODULE_FIRMWARE(FIRMWARE_R520);
static int radeon_do_cleanup_cp(struct drm_device * dev);
static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off)
{
u32 val;
if (dev_priv->flags & RADEON_IS_AGP) {
val = DRM_READ32(dev_priv->ring_rptr, off);
} else {
val = *(((volatile u32 *)
dev_priv->ring_rptr->handle) +
(off / sizeof(u32)));
val = le32_to_cpu(val);
}
return val;
}
u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv)
{
if (dev_priv->writeback_works)
return radeon_read_ring_rptr(dev_priv, 0);
else {
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return RADEON_READ(R600_CP_RB_RPTR);
else
return RADEON_READ(RADEON_CP_RB_RPTR);
}
}
void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val)
{
if (dev_priv->flags & RADEON_IS_AGP)
DRM_WRITE32(dev_priv->ring_rptr, off, val);
else
*(((volatile u32 *) dev_priv->ring_rptr->handle) +
(off / sizeof(u32))) = cpu_to_le32(val);
}
void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val)
{
radeon_write_ring_rptr(dev_priv, 0, val);
}
u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
{
if (dev_priv->writeback_works) {
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return radeon_read_ring_rptr(dev_priv,
R600_SCRATCHOFF(index));
else
return radeon_read_ring_rptr(dev_priv,
RADEON_SCRATCHOFF(index));
} else {
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return RADEON_READ(R600_SCRATCH_REG0 + 4*index);
else
return RADEON_READ(RADEON_SCRATCH_REG0 + 4*index);
}
}
u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
if (addr < 0x10000)
ret = DRM_READ32(dev_priv->mmio, addr);
else {
DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
}
return ret;
}
static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
ret = RADEON_READ(R520_MC_IND_DATA);
RADEON_WRITE(R520_MC_IND_INDEX, 0);
return ret;
}
static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
ret = RADEON_READ(RS480_NB_MC_DATA);
RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
return ret;
}
static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
ret = RADEON_READ(RS690_MC_DATA);
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
return ret;
}
static u32 RS600_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
RADEON_WRITE(RS600_MC_INDEX, ((addr & RS600_MC_ADDR_MASK) |
RS600_MC_IND_CITF_ARB0));
ret = RADEON_READ(RS600_MC_DATA);
return ret;
}
static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
return RS690_READ_MCIND(dev_priv, addr);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
return RS600_READ_MCIND(dev_priv, addr);
else
return RS480_READ_MCIND(dev_priv, addr);
}
u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
return RADEON_READ(R700_MC_VM_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return RADEON_READ(R600_MC_VM_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
return RS600_READ_MCIND(dev_priv, RS600_MC_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
else
return RADEON_READ(RADEON_MC_FB_LOCATION);
}
static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
RS600_WRITE_MCIND(RS600_MC_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
else
RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
}
void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
{
/*R6xx/R7xx: AGP_TOP and BOT are actually 18 bits each */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
RADEON_WRITE(R700_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
RADEON_WRITE(R700_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
RADEON_WRITE(R600_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
RS600_WRITE_MCIND(RS600_MC_AGP_LOCATION, agp_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
else
RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
}
void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
{
u32 agp_base_hi = upper_32_bits(agp_base);
u32 agp_base_lo = agp_base & 0xffffffff;
u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff;
/* R6xx/R7xx must be aligned to a 4MB boundary */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
RADEON_WRITE(R600_MC_VM_AGP_BASE, r6xx_agp_base);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
RS600_WRITE_MCIND(RS600_AGP_BASE, agp_base_lo);
RS600_WRITE_MCIND(RS600_AGP_BASE_2, agp_base_hi);
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
} else {
RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
}
}
void radeon_enable_bm(struct drm_radeon_private *dev_priv)
{
u32 tmp;
/* Turn on bus mastering */
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
/* rs600/rs690/rs740 */
tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
} /* PCIE cards appears to not need this */
}
static int RADEON_READ_PLL(struct drm_device * dev, int addr)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
}
static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
{
RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
return RADEON_READ(RADEON_PCIE_DATA);
}
#if RADEON_FIFO_DEBUG
static void radeon_status(drm_radeon_private_t * dev_priv)
{
printk("%s:\n", __func__);
printk("RBBM_STATUS = 0x%08x\n",
(unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
printk("CP_RB_RTPR = 0x%08x\n",
(unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
printk("CP_RB_WTPR = 0x%08x\n",
(unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
printk("AIC_CNTL = 0x%08x\n",
(unsigned int)RADEON_READ(RADEON_AIC_CNTL));
printk("AIC_STAT = 0x%08x\n",
(unsigned int)RADEON_READ(RADEON_AIC_STAT));
printk("AIC_PT_BASE = 0x%08x\n",
(unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
printk("TLB_ADDR = 0x%08x\n",
(unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
printk("TLB_DATA = 0x%08x\n",
(unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
}
#endif
/* ================================================================
* Engine, FIFO control
*/
static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
{
u32 tmp;
int i;
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
tmp |= RADEON_RB3D_DC_FLUSH_ALL;
RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
& RADEON_RB3D_DC_BUSY)) {
return 0;
}
DRM_UDELAY(1);
}
} else {
/* don't flush or purge cache here or lockup */
return 0;
}
#if RADEON_FIFO_DEBUG
DRM_ERROR("failed!\n");
radeon_status(dev_priv);
#endif
return -EBUSY;
}
static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
{
int i;
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
for (i = 0; i < dev_priv->usec_timeout; i++) {
int slots = (RADEON_READ(RADEON_RBBM_STATUS)
& RADEON_RBBM_FIFOCNT_MASK);
if (slots >= entries)
return 0;
DRM_UDELAY(1);
}
DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n",
RADEON_READ(RADEON_RBBM_STATUS),
RADEON_READ(R300_VAP_CNTL_STATUS));
#if RADEON_FIFO_DEBUG
DRM_ERROR("failed!\n");
radeon_status(dev_priv);
#endif
return -EBUSY;
}
static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
{
int i, ret;
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
ret = radeon_do_wait_for_fifo(dev_priv, 64);
if (ret)
return ret;
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(RADEON_READ(RADEON_RBBM_STATUS)
& RADEON_RBBM_ACTIVE)) {
radeon_do_pixcache_flush(dev_priv);
return 0;
}
DRM_UDELAY(1);
}
DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n",
RADEON_READ(RADEON_RBBM_STATUS),
RADEON_READ(R300_VAP_CNTL_STATUS));
#if RADEON_FIFO_DEBUG
DRM_ERROR("failed!\n");
radeon_status(dev_priv);
#endif
return -EBUSY;
}
static void radeon_init_pipes(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
uint32_t gb_tile_config, gb_pipe_sel = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2);
if ((z_pipe_sel & 3) == 3)
dev_priv->num_z_pipes = 2;
else
dev_priv->num_z_pipes = 1;
} else
dev_priv->num_z_pipes = 1;
/* RS4xx/RS6xx/R4xx/R5xx */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
/* SE cards have 1 pipe */
if ((dev->pdev->device == 0x5e4c) ||
(dev->pdev->device == 0x5e4f))
dev_priv->num_gb_pipes = 1;
} else {
/* R3xx */
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
dev->pdev->device != 0x4144) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
dev->pdev->device != 0x4148)) {
dev_priv->num_gb_pipes = 2;
} else {
/* RV3xx/R300 AD/R350 AH */
dev_priv->num_gb_pipes = 1;
}
}
DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
switch (dev_priv->num_gb_pipes) {
case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
default:
case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
RADEON_WRITE(R300_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
}
RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
radeon_do_wait_for_idle(dev_priv);
RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
R300_DC_AUTOFLUSH_ENABLE |
R300_DC_DC_DISABLE_IGNORE_PE));
}
/* ================================================================
* CP control, initialization
*/
/* Load the microcode for the CP */
static int radeon_cp_init_microcode(drm_radeon_private_t *dev_priv)
{
struct platform_device *pdev;
const char *fw_name = NULL;
int err;
DRM_DEBUG("\n");
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
err = IS_ERR(pdev);
if (err) {
printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
return -EINVAL;
}
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
DRM_INFO("Loading R100 Microcode\n");
fw_name = FIRMWARE_R100;
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
DRM_INFO("Loading R200 Microcode\n");
fw_name = FIRMWARE_R200;
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
DRM_INFO("Loading R300 Microcode\n");
fw_name = FIRMWARE_R300;
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
DRM_INFO("Loading R400 Microcode\n");
fw_name = FIRMWARE_R420;
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
DRM_INFO("Loading RS690/RS740 Microcode\n");
fw_name = FIRMWARE_RS690;
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
DRM_INFO("Loading RS600 Microcode\n");
fw_name = FIRMWARE_RS600;
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
DRM_INFO("Loading R500 Microcode\n");
fw_name = FIRMWARE_R520;
}
err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
platform_device_unregister(pdev);
if (err) {
printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
fw_name);
} else if (dev_priv->me_fw->size % 8) {
printk(KERN_ERR
"radeon_cp: Bogus length %zu in firmware \"%s\"\n",
dev_priv->me_fw->size, fw_name);
err = -EINVAL;
release_firmware(dev_priv->me_fw);
dev_priv->me_fw = NULL;
}
return err;
}
static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv)
{
const __be32 *fw_data;
int i, size;
radeon_do_wait_for_idle(dev_priv);
if (dev_priv->me_fw) {
size = dev_priv->me_fw->size / 4;
fw_data = (const __be32 *)&dev_priv->me_fw->data[0];
RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
for (i = 0; i < size; i += 2) {
RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
be32_to_cpup(&fw_data[i]));
RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
be32_to_cpup(&fw_data[i + 1]));
}
}
}
/* Flush any pending commands to the CP. This should only be used just
* prior to a wait for idle, as it informs the engine that the command
* stream is ending.
*/
static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
{
DRM_DEBUG("\n");
#if 0
u32 tmp;
tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
#endif
}
/* Wait for the CP to go idle.
*/
int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
{
RING_LOCALS;
DRM_DEBUG("\n");
BEGIN_RING(6);
RADEON_PURGE_CACHE();
RADEON_PURGE_ZCACHE();
RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING();
COMMIT_RING();
return radeon_do_wait_for_idle(dev_priv);
}
/* Start the Command Processor.
*/
static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
{
RING_LOCALS;
DRM_DEBUG("\n");
radeon_do_wait_for_idle(dev_priv);
RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
dev_priv->cp_running = 1;
/* on r420, any DMA from CP to system memory while 2D is active
* can cause a hang. workaround is to queue a CP RESYNC token
*/
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
BEGIN_RING(3);
OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1));
OUT_RING(5); /* scratch reg 5 */
OUT_RING(0xdeadbeef);
ADVANCE_RING();
COMMIT_RING();
}
BEGIN_RING(8);
/* isync can only be written through cp on r5xx write it here */
OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
RADEON_PURGE_CACHE();
RADEON_PURGE_ZCACHE();
RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING();
COMMIT_RING();
dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
}
/* Reset the Command Processor. This will not flush any pending
* commands, so you must wait for the CP command stream to complete
* before calling this routine.
*/
static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
{
u32 cur_read_ptr;
DRM_DEBUG("\n");
cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
SET_RING_HEAD(dev_priv, cur_read_ptr);
dev_priv->ring.tail = cur_read_ptr;
}
/* Stop the Command Processor. This will not flush any pending
* commands, so you must flush the command stream and wait for the CP
* to go idle before calling this routine.
*/
static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
{
RING_LOCALS;
DRM_DEBUG("\n");
/* finish the pending CP_RESYNC token */
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FINISH);
ADVANCE_RING();
COMMIT_RING();
radeon_do_wait_for_idle(dev_priv);
}
RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
dev_priv->cp_running = 0;
}
/* Reset the engine. This will stop the CP if it is running.
*/
static int radeon_do_engine_reset(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
DRM_DEBUG("\n");
radeon_do_pixcache_flush(dev_priv);
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
/* may need something similar for newer chips */
clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
RADEON_FORCEON_MCLKA |
RADEON_FORCEON_MCLKB |
RADEON_FORCEON_YCLKA |
RADEON_FORCEON_YCLKB |
RADEON_FORCEON_MC |
RADEON_FORCEON_AIC));
}
rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
RADEON_SOFT_RESET_CP |
RADEON_SOFT_RESET_HI |
RADEON_SOFT_RESET_SE |
RADEON_SOFT_RESET_RE |
RADEON_SOFT_RESET_PP |
RADEON_SOFT_RESET_E2 |
RADEON_SOFT_RESET_RB));
RADEON_READ(RADEON_RBBM_SOFT_RESET);
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
~(RADEON_SOFT_RESET_CP |
RADEON_SOFT_RESET_HI |
RADEON_SOFT_RESET_SE |
RADEON_SOFT_RESET_RE |
RADEON_SOFT_RESET_PP |
RADEON_SOFT_RESET_E2 |
RADEON_SOFT_RESET_RB)));
RADEON_READ(RADEON_RBBM_SOFT_RESET);
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
}
/* setup the raster pipes */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
radeon_init_pipes(dev);
/* Reset the CP ring */
radeon_do_cp_reset(dev_priv);
/* The CP is no longer running after an engine reset */
dev_priv->cp_running = 0;
/* Reset any pending vertex, indirect buffers */
radeon_freelist_reset(dev);
return 0;
}
static void radeon_cp_init_ring_buffer(struct drm_device * dev,
drm_radeon_private_t *dev_priv,
struct drm_file *file_priv)
{
struct drm_radeon_master_private *master_priv;
u32 ring_start, cur_read_ptr;
/* Initialize the memory controller. With new memory map, the fb location
* is not changed, it should have been properly initialized already. Part
* of the problem is that the code below is bogus, assuming the GART is
* always appended to the fb which is not necessarily the case
*/
if (!dev_priv->new_memmap)
radeon_write_fb_location(dev_priv,
((dev_priv->gart_vm_start - 1) & 0xffff0000)
| (dev_priv->fb_location >> 16));
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
radeon_write_agp_base(dev_priv, dev->agp->base);
radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
ring_start = (dev_priv->cp_ring->offset
- dev->agp->base
+ dev_priv->gart_vm_start);
} else
#endif
ring_start = (dev_priv->cp_ring->offset
- (unsigned long)dev->sg->virtual
+ dev_priv->gart_vm_start);
RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
/* Set the write pointer delay */
RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
/* Initialize the ring buffer's read and write pointers */
cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
SET_RING_HEAD(dev_priv, cur_read_ptr);
dev_priv->ring.tail = cur_read_ptr;
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
dev_priv->ring_rptr->offset
- dev->agp->base + dev_priv->gart_vm_start);
} else
#endif
{
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
dev_priv->ring_rptr->offset
- ((unsigned long) dev->sg->virtual)
+ dev_priv->gart_vm_start);
}
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(RADEON_CP_RB_CNTL,
RADEON_BUF_SWAP_32BIT |
(dev_priv->ring.fetch_size_l2ow << 18) |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
RADEON_WRITE(RADEON_CP_RB_CNTL,
(dev_priv->ring.fetch_size_l2ow << 18) |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#endif
/* Initialize the scratch register pointer. This will cause
* the scratch register values to be written out to memory
* whenever they are updated.
*
* We simply put this behind the ring read pointer, this works
* with PCI GART as well as (whatever kind of) AGP GART
*/
RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
+ RADEON_SCRATCH_REG_OFFSET);
RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
radeon_enable_bm(dev_priv);
radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(0), 0);
RADEON_WRITE(RADEON_LAST_FRAME_REG, 0);
radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0);
radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(2), 0);
RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0);
/* reset sarea copies of these */
master_priv = file_priv->master->driver_priv;
if (master_priv->sarea_priv) {
master_priv->sarea_priv->last_frame = 0;
master_priv->sarea_priv->last_dispatch = 0;
master_priv->sarea_priv->last_clear = 0;
}
radeon_do_wait_for_idle(dev_priv);
/* Sync everything up */
RADEON_WRITE(RADEON_ISYNC_CNTL,
(RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI));
}
static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
{
u32 tmp;
/* Start with assuming that writeback doesn't work */
dev_priv->writeback_works = 0;
/* Writeback doesn't seem to work everywhere, test it here and possibly
* enable it if it appears to work
*/
radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
u32 val;
val = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
if (val == 0xdeadbeef)
break;
DRM_UDELAY(1);
}
if (tmp < dev_priv->usec_timeout) {
dev_priv->writeback_works = 1;
DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
} else {
dev_priv->writeback_works = 0;
DRM_INFO("writeback test failed\n");
}
if (radeon_no_wb == 1) {
dev_priv->writeback_works = 0;
DRM_INFO("writeback forced off\n");
}
if (!dev_priv->writeback_works) {
/* Disable writeback to avoid unnecessary bus master transfer */
RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
RADEON_RB_NO_UPDATE);
RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
}
}
/* Enable or disable IGP GART on the chip */
static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
{
u32 temp;
if (on) {
DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
dev_priv->gart_vm_start,
(long)dev_priv->gart_info.bus_addr,
dev_priv->gart_size);
temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
RS690_BLOCK_GFX_D3_EN));
else
IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
RS480_VA_SIZE_32MB));
temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
RS480_TLB_ENABLE |
RS480_GTW_LAC_EN |
RS480_1LEVEL_GART));
temp = dev_priv->gart_info.bus_addr & 0xfffff000;
temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
IGP_WRITE_MCIND(RS480_GART_BASE, temp);
temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
RS480_REQ_TYPE_SNOOP_DIS));
radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
dev_priv->gart_size = 32*1024*1024;
temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
0xffff0000) | (dev_priv->gart_vm_start >> 16));
radeon_write_agp_location(dev_priv, temp);
temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
RS480_VA_SIZE_32MB));
do {
temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
break;
DRM_UDELAY(1);
} while (1);
IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
RS480_GART_CACHE_INVALIDATE);
do {
temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
break;
DRM_UDELAY(1);
} while (1);
IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
} else {
IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
}
}
/* Enable or disable IGP GART on the chip */
static void rs600_set_igpgart(drm_radeon_private_t *dev_priv, int on)
{
u32 temp;
int i;
if (on) {
DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
dev_priv->gart_vm_start,
(long)dev_priv->gart_info.bus_addr,
dev_priv->gart_size);
IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
for (i = 0; i < 19; i++)
IGP_WRITE_MCIND(RS600_MC_PT0_CLIENT0_CNTL + i,
(RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
RS600_SYSTEM_ACCESS_MODE_IN_SYS |
RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH |
RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
RS600_ENABLE_FRAGMENT_PROCESSING |
RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE |
RS600_PAGE_TABLE_TYPE_FLAT));
/* disable all other contexts */
for (i = 1; i < 8; i++)
IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
/* setup the page table aperture */
IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
dev_priv->gart_info.bus_addr);
IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR,
dev_priv->gart_vm_start);
IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR,
(dev_priv->gart_vm_start + dev_priv->gart_size - 1));
IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
/* setup the system aperture */
IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR,
dev_priv->gart_vm_start);
IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR,
(dev_priv->gart_vm_start + dev_priv->gart_size - 1));
/* enable page tables */
temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (temp | RS600_ENABLE_PT));
temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
IGP_WRITE_MCIND(RS600_MC_CNTL1, (temp | RS600_ENABLE_PAGE_TABLES));
/* invalidate the cache */
temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
temp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
} else {
IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, 0);
temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
temp &= ~RS600_ENABLE_PAGE_TABLES;
IGP_WRITE_MCIND(RS600_MC_CNTL1, temp);
}
}
static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
if (on) {
DRM_DEBUG("programming pcie %08X %08lX %08X\n",
dev_priv->gart_vm_start,
(long)dev_priv->gart_info.bus_addr,
dev_priv->gart_size);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
dev_priv->gart_vm_start);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
dev_priv->gart_info.bus_addr);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
dev_priv->gart_vm_start);
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
dev_priv->gart_vm_start +
dev_priv->gart_size - 1);
radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
RADEON_PCIE_TX_GART_EN);
} else {
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
tmp & ~RADEON_PCIE_TX_GART_EN);
}
}
/* Enable or disable PCI GART on the chip */
static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp;
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
(dev_priv->flags & RADEON_IS_IGPGART)) {
radeon_set_igpgart(dev_priv, on);
return;
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
rs600_set_igpgart(dev_priv, on);
return;
}
if (dev_priv->flags & RADEON_IS_PCIE) {
radeon_set_pciegart(dev_priv, on);
return;
}
tmp = RADEON_READ(RADEON_AIC_CNTL);
if (on) {
RADEON_WRITE(RADEON_AIC_CNTL,
tmp | RADEON_PCIGART_TRANSLATE_EN);
/* set PCI GART page-table base address
*/
RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
/* set address range for PCI address translate
*/
RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
+ dev_priv->gart_size - 1);
/* Turn off AGP aperture -- is this required for PCI GART?
*/
radeon_write_agp_location(dev_priv, 0xffffffc0);
RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
} else {
RADEON_WRITE(RADEON_AIC_CNTL,
tmp & ~RADEON_PCIGART_TRANSLATE_EN);
}
}
static int radeon_setup_pcigart_surface(drm_radeon_private_t *dev_priv)
{
struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
struct radeon_virt_surface *vp;
int i;
for (i = 0; i < RADEON_MAX_SURFACES * 2; i++) {
if (!dev_priv->virt_surfaces[i].file_priv ||
dev_priv->virt_surfaces[i].file_priv == PCIGART_FILE_PRIV)
break;
}
if (i >= 2 * RADEON_MAX_SURFACES)
return -ENOMEM;
vp = &dev_priv->virt_surfaces[i];
for (i = 0; i < RADEON_MAX_SURFACES; i++) {
struct radeon_surface *sp = &dev_priv->surfaces[i];
if (sp->refcount)
continue;
vp->surface_index = i;
vp->lower = gart_info->bus_addr;
vp->upper = vp->lower + gart_info->table_size;
vp->flags = 0;
vp->file_priv = PCIGART_FILE_PRIV;
sp->refcount = 1;
sp->lower = vp->lower;
sp->upper = vp->upper;
sp->flags = 0;
RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, sp->flags);
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, sp->lower);
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, sp->upper);
return 0;
}
return -ENOMEM;
}
static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
DRM_DEBUG("\n");
/* if we require new memory map but we don't have it fail */
if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
DRM_DEBUG("Forcing AGP card to PCI mode\n");
dev_priv->flags &= ~RADEON_IS_AGP;
} else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
&& !init->is_pci) {
DRM_DEBUG("Restoring AGP flag\n");
dev_priv->flags |= RADEON_IS_AGP;
}
if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
DRM_ERROR("PCI GART memory not allocated!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
dev_priv->usec_timeout = init->usec_timeout;
if (dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
DRM_DEBUG("TIMEOUT problem!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
/* Enable vblank on CRTC1 for older X servers
*/
dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
switch(init->func) {
case RADEON_INIT_R200_CP:
dev_priv->microcode_version = UCODE_R200;
break;
case RADEON_INIT_R300_CP:
dev_priv->microcode_version = UCODE_R300;
break;
default:
dev_priv->microcode_version = UCODE_R100;
}
dev_priv->do_boxes = 0;
dev_priv->cp_mode = init->cp_mode;
/* We don't support anything other than bus-mastering ring mode,
* but the ring can be in either AGP or PCI space for the ring
* read pointer.
*/
if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
(init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
switch (init->fb_bpp) {
case 16:
dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
break;
case 32:
default:
dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
break;
}
dev_priv->front_offset = init->front_offset;
dev_priv->front_pitch = init->front_pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->back_pitch = init->back_pitch;
switch (init->depth_bpp) {
case 16:
dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
break;
case 32:
default:
dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
break;
}
dev_priv->depth_offset = init->depth_offset;
dev_priv->depth_pitch = init->depth_pitch;
/* Hardware state for depth clears. Remove this if/when we no
* longer clear the depth buffer with a 3D rectangle. Hard-code
* all values to prevent unwanted 3D state from slipping through
* and screwing with the clear operation.
*/
dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
(dev_priv->color_fmt << 10) |
(dev_priv->microcode_version ==
UCODE_R100 ? RADEON_ZBLOCK16 : 0));
dev_priv->depth_clear.rb3d_zstencilcntl =
(dev_priv->depth_fmt |
RADEON_Z_TEST_ALWAYS |
RADEON_STENCIL_TEST_ALWAYS |
RADEON_STENCIL_S_FAIL_REPLACE |
RADEON_STENCIL_ZPASS_REPLACE |
RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
RADEON_BFACE_SOLID |
RADEON_FFACE_SOLID |
RADEON_FLAT_SHADE_VTX_LAST |
RADEON_DIFFUSE_SHADE_FLAT |
RADEON_ALPHA_SHADE_FLAT |
RADEON_SPECULAR_SHADE_FLAT |
RADEON_FOG_SHADE_FLAT |
RADEON_VTX_PIX_CENTER_OGL |
RADEON_ROUND_MODE_TRUNC |
RADEON_ROUND_PREC_8TH_PIX);
dev_priv->ring_offset = init->ring_offset;
dev_priv->ring_rptr_offset = init->ring_rptr_offset;
dev_priv->buffers_offset = init->buffers_offset;
dev_priv->gart_textures_offset = init->gart_textures_offset;
master_priv->sarea = drm_getsarea(dev);
if (!master_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
if (!dev_priv->cp_ring) {
DRM_ERROR("could not find cp ring region!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
if (!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
if (init->gart_textures_offset) {
dev_priv->gart_textures =
drm_core_findmap(dev, init->gart_textures_offset);
if (!dev_priv->gart_textures) {
DRM_ERROR("could not find GART texture region!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
}
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
drm_core_ioremap_wc(dev_priv->cp_ring, dev);
drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
drm_core_ioremap_wc(dev->agp_buffer_map, dev);
if (!dev_priv->cp_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) {
DRM_ERROR("could not find ioremap agp regions!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
} else
#endif
{
dev_priv->cp_ring->handle =
(void *)(unsigned long)dev_priv->cp_ring->offset;
dev_priv->ring_rptr->handle =
(void *)(unsigned long)dev_priv->ring_rptr->offset;
dev->agp_buffer_map->handle =
(void *)(unsigned long)dev->agp_buffer_map->offset;
DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
dev_priv->cp_ring->handle);
DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
dev_priv->ring_rptr->handle);
DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
dev->agp_buffer_map->handle);
}
dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
dev_priv->fb_size =
((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
- dev_priv->fb_location;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
((dev_priv->front_offset
+ dev_priv->fb_location) >> 10));
dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
((dev_priv->back_offset
+ dev_priv->fb_location) >> 10));
dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
((dev_priv->depth_offset
+ dev_priv->fb_location) >> 10));
dev_priv->gart_size = init->gart_size;
/* New let's set the memory map ... */
if (dev_priv->new_memmap) {
u32 base = 0;
DRM_INFO("Setting GART location based on new memory map\n");
/* If using AGP, try to locate the AGP aperture at the same
* location in the card and on the bus, though we have to
* align it down.
*/
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
base = dev->agp->base;
/* Check if valid */
if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
dev->agp->base);
base = 0;
}
}
#endif
/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
if (base == 0) {
base = dev_priv->fb_location + dev_priv->fb_size;
if (base < dev_priv->fb_location ||
((base + dev_priv->gart_size) & 0xfffffffful) < base)
base = dev_priv->fb_location
- dev_priv->gart_size;
}
dev_priv->gart_vm_start = base & 0xffc00000u;
if (dev_priv->gart_vm_start != base)
DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
base, dev_priv->gart_vm_start);
} else {
DRM_INFO("Setting GART location based on old memory map\n");
dev_priv->gart_vm_start = dev_priv->fb_location +
RADEON_READ(RADEON_CONFIG_APER_SIZE);
}
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP)
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
- dev->agp->base
+ dev_priv->gart_vm_start);
else
#endif
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
- (unsigned long)dev->sg->virtual
+ dev_priv->gart_vm_start);
DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
dev_priv->gart_buffers_offset);
dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
/* Turn off PCI GART */
radeon_set_pcigart(dev_priv, 0);
} else
#endif
{
u32 sctrl;
int ret;
dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
/* if we have an offset set from userspace */
if (dev_priv->pcigart_offset_set) {
dev_priv->gart_info.bus_addr =
(resource_size_t)dev_priv->pcigart_offset + dev_priv->fb_location;
dev_priv->gart_info.mapping.offset =
dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
dev_priv->gart_info.mapping.size =
dev_priv->gart_info.table_size;
drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr =
dev_priv->gart_info.mapping.handle;
if (dev_priv->flags & RADEON_IS_PCIE)
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
else
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_FB;
DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
dev_priv->gart_info.addr,
dev_priv->pcigart_offset);
} else {
if (dev_priv->flags & RADEON_IS_IGPGART)
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
else
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_MAIN;
dev_priv->gart_info.addr = NULL;
dev_priv->gart_info.bus_addr = 0;
if (dev_priv->flags & RADEON_IS_PCIE) {
DRM_ERROR
("Cannot use PCI Express without GART in FB memory\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
}
sctrl = RADEON_READ(RADEON_SURFACE_CNTL);
RADEON_WRITE(RADEON_SURFACE_CNTL, 0);
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
ret = r600_page_table_init(dev);
else
ret = drm_ati_pcigart_init(dev, &dev_priv->gart_info);
RADEON_WRITE(RADEON_SURFACE_CNTL, sctrl);
if (!ret) {
DRM_ERROR("failed to init PCI GART!\n");
radeon_do_cleanup_cp(dev);
return -ENOMEM;
}
ret = radeon_setup_pcigart_surface(dev_priv);
if (ret) {
DRM_ERROR("failed to setup GART surface!\n");
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
r600_page_table_cleanup(dev, &dev_priv->gart_info);
else
drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info);
radeon_do_cleanup_cp(dev);
return ret;
}
/* Turn on PCI GART */
radeon_set_pcigart(dev_priv, 1);
}
if (!dev_priv->me_fw) {
int err = radeon_cp_init_microcode(dev_priv);
if (err) {
DRM_ERROR("Failed to load firmware!\n");
radeon_do_cleanup_cp(dev);
return err;
}
}
radeon_cp_load_microcode(dev_priv);
radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
dev_priv->last_buf = 0;
radeon_do_engine_reset(dev);
radeon_test_writeback(dev_priv);
return 0;
}
static int radeon_do_cleanup_cp(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq_enabled)
drm_irq_uninstall(dev);
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
if (dev_priv->cp_ring != NULL) {
drm_core_ioremapfree(dev_priv->cp_ring, dev);
dev_priv->cp_ring = NULL;
}
if (dev_priv->ring_rptr != NULL) {
drm_core_ioremapfree(dev_priv->ring_rptr, dev);
dev_priv->ring_rptr = NULL;
}
if (dev->agp_buffer_map != NULL) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
dev->agp_buffer_map = NULL;
}
} else
#endif
{
if (dev_priv->gart_info.bus_addr) {
/* Turn off PCI GART */
radeon_set_pcigart(dev_priv, 0);
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
r600_page_table_cleanup(dev, &dev_priv->gart_info);
else {
if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
DRM_ERROR("failed to cleanup PCI GART!\n");
}
}
if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
{
drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr = NULL;
}
}
/* only clear to the start of flags */
memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
return 0;
}
/* This code will reinit the Radeon CP hardware after a resume from disc.
* AFAIK, it would be very difficult to pickle the state at suspend time, so
* here we make sure that all Radeon hardware initialisation is re-done without
* affecting running applications.
*
* Charl P. Botha <http://cpbotha.net>
*/
static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if (!dev_priv) {
DRM_ERROR("Called with no initialization\n");
return -EINVAL;
}
DRM_DEBUG("Starting radeon_do_resume_cp()\n");
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
/* Turn off PCI GART */
radeon_set_pcigart(dev_priv, 0);
} else
#endif
{
/* Turn on PCI GART */
radeon_set_pcigart(dev_priv, 1);
}
radeon_cp_load_microcode(dev_priv);
radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
dev_priv->have_z_offset = 0;
radeon_do_engine_reset(dev);
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
DRM_DEBUG("radeon_do_resume_cp() complete\n");
return 0;
}
int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_init_t *init = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (init->func == RADEON_INIT_R300_CP)
r300_init_reg_flags(dev);
switch (init->func) {
case RADEON_INIT_CP:
case RADEON_INIT_R200_CP:
case RADEON_INIT_R300_CP:
return radeon_do_init_cp(dev, init, file_priv);
case RADEON_INIT_R600_CP:
return r600_do_init_cp(dev, init, file_priv);
case RADEON_CLEANUP_CP:
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return r600_do_cleanup_cp(dev);
else
return radeon_do_cleanup_cp(dev);
}
return -EINVAL;
}
int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cp_running) {
DRM_DEBUG("while CP running\n");
return 0;
}
if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
DRM_DEBUG("called with bogus CP mode (%d)\n",
dev_priv->cp_mode);
return 0;
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
r600_do_cp_start(dev_priv);
else
radeon_do_cp_start(dev_priv);
return 0;
}
/* Stop the CP. The engine must have been idled before calling this
* routine.
*/
int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_cp_stop_t *stop = data;
int ret;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv->cp_running)
return 0;
/* Flush any pending CP commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
if (stop->flush) {
radeon_do_cp_flush(dev_priv);
}
/* If we fail to make the engine go idle, we return an error
* code so that the DRM ioctl wrapper can try again.
*/
if (stop->idle) {
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
ret = r600_do_cp_idle(dev_priv);
else
ret = radeon_do_cp_idle(dev_priv);
if (ret)
return ret;
}
/* Finally, we can turn off the CP. If the engine isn't idle,
* we will get some dropped triangles as they won't be fully
* rendered before the CP is shut down.
*/
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
r600_do_cp_stop(dev_priv);
else
radeon_do_cp_stop(dev_priv);
/* Reset the engine */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
r600_do_engine_reset(dev);
else
radeon_do_engine_reset(dev);
return 0;
}
void radeon_do_release(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int i, ret;
if (dev_priv) {
if (dev_priv->cp_running) {
/* Stop the cp */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
while ((ret = r600_do_cp_idle(dev_priv)) != 0) {
DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
#ifdef __linux__
schedule();
#else
tsleep(&ret, PZERO, "rdnrel", 1);
#endif
}
} else {
while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
#ifdef __linux__
schedule();
#else
tsleep(&ret, PZERO, "rdnrel", 1);
#endif
}
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
r600_do_cp_stop(dev_priv);
r600_do_engine_reset(dev);
} else {
radeon_do_cp_stop(dev_priv);
radeon_do_engine_reset(dev);
}
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_R600) {
/* Disable *all* interrupts */
if (dev_priv->mmio) /* remove this after permanent addmaps */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
if (dev_priv->mmio) { /* remove all surfaces */
for (i = 0; i < RADEON_MAX_SURFACES; i++) {
RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
16 * i, 0);
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
16 * i, 0);
}
}
}
/* Free memory heap structures */
radeon_mem_takedown(&(dev_priv->gart_heap));
radeon_mem_takedown(&(dev_priv->fb_heap));
/* deallocate kernel resources */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
r600_do_cleanup_cp(dev);
else
radeon_do_cleanup_cp(dev);
if (dev_priv->me_fw) {
release_firmware(dev_priv->me_fw);
dev_priv->me_fw = NULL;
}
if (dev_priv->pfp_fw) {
release_firmware(dev_priv->pfp_fw);
dev_priv->pfp_fw = NULL;
}
}
}
/* Just reset the CP ring. Called as part of an X Server engine reset.
*/
int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_DEBUG("called before init done\n");
return -EINVAL;
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
r600_do_cp_reset(dev_priv);
else
radeon_do_cp_reset(dev_priv);
/* The CP is no longer running after an engine reset */
dev_priv->cp_running = 0;
return 0;
}
int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return r600_do_cp_idle(dev_priv);
else
return radeon_do_cp_idle(dev_priv);
}
/* Added by Charl P. Botha to call radeon_do_resume_cp().
*/
int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return r600_do_resume_cp(dev, file_priv);
else
return radeon_do_resume_cp(dev, file_priv);
}
int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return r600_do_engine_reset(dev);
else
return radeon_do_engine_reset(dev);
}
/* ================================================================
* Fullscreen mode
*/
/* KW: Deprecated to say the least:
*/
int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return 0;
}
/* ================================================================
* Freelist management
*/
/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
* bufs until freelist code is used. Note this hides a problem with
* the scratch register * (used to keep track of last buffer
* completed) being written to before * the last buffer has actually
* completed rendering.
*
* KW: It's also a good way to find free buffers quickly.
*
* KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
* sleep. However, bugs in older versions of radeon_accel.c mean that
* we essentially have to do this, else old clients will break.
*
* However, it does leave open a potential deadlock where all the
* buffers are held by other clients, which can't release them because
* they can't get the lock.
*/
struct drm_buf *radeon_freelist_get(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv;
struct drm_buf *buf;
int i, t;
int start;
if (++dev_priv->last_buf >= dma->buf_count)
dev_priv->last_buf = 0;
start = dev_priv->last_buf;
for (t = 0; t < dev_priv->usec_timeout; t++) {
u32 done_age = GET_SCRATCH(dev_priv, 1);
DRM_DEBUG("done_age = %d\n", done_age);
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[start];
buf_priv = buf->dev_private;
if (buf->file_priv == NULL || (buf->pending &&
buf_priv->age <=
done_age)) {
dev_priv->stats.requested_bufs++;
buf->pending = 0;
return buf;
}
if (++start >= dma->buf_count)
start = 0;
}
if (t) {
DRM_UDELAY(1);
dev_priv->stats.freelist_loops++;
}
}
return NULL;
}
void radeon_freelist_reset(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
drm_radeon_private_t *dev_priv = dev->dev_private;
int i;
dev_priv->last_buf = 0;
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
}
/* ================================================================
* CP command submission
*/
int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
{
drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
int i;
u32 last_head = GET_RING_HEAD(dev_priv);
for (i = 0; i < dev_priv->usec_timeout; i++) {
u32 head = GET_RING_HEAD(dev_priv);
ring->space = (head - ring->tail) * sizeof(u32);
if (ring->space <= 0)
ring->space += ring->size;
if (ring->space > n)
return 0;
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
if (head != last_head)
i = 0;
last_head = head;
DRM_UDELAY(1);
}
/* FIXME: This return value is ignored in the BEGIN_RING macro! */
#if RADEON_FIFO_DEBUG
radeon_status(dev_priv);
DRM_ERROR("failed!\n");
#endif
return -EBUSY;
}
static int radeon_cp_get_buffers(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_dma * d)
{
int i;
struct drm_buf *buf;
for (i = d->granted_count; i < d->request_count; i++) {
buf = radeon_freelist_get(dev);
if (!buf)
return -EBUSY; /* NOTE: broken client */
buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
return -EFAULT;
d->granted_count++;
}
return 0;
}
int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int ret = 0;
struct drm_dma *d = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d->send_count);
return -EINVAL;
}
/* We'll send you buffers.
*/
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d->request_count, dma->buf_count);
return -EINVAL;
}
d->granted_count = 0;
if (d->request_count) {
ret = radeon_cp_get_buffers(dev, file_priv, d);
}
return ret;
}
int radeon_driver_load(struct drm_device *dev, unsigned long flags)
{
drm_radeon_private_t *dev_priv;
int ret = 0;
dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
dev_priv->flags = flags;
switch (flags & RADEON_FAMILY_MASK) {
case CHIP_R100:
case CHIP_RV200:
case CHIP_R200:
case CHIP_R300:
case CHIP_R350:
case CHIP_R420:
case CHIP_R423:
case CHIP_RV410:
case CHIP_RV515:
case CHIP_R520:
case CHIP_RV570:
case CHIP_R580:
dev_priv->flags |= RADEON_HAS_HIERZ;
break;
default:
/* all other chips have no hierarchical z buffer */
break;
}
pci_set_master(dev->pdev);
if (drm_pci_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
else if (pci_is_pcie(dev->pdev))
dev_priv->flags |= RADEON_IS_PCIE;
else
dev_priv->flags |= RADEON_IS_PCI;
ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2),
pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
_DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
if (ret != 0)
return ret;
ret = drm_vblank_init(dev, 2);
if (ret) {
radeon_driver_unload(dev);
return ret;
}
DRM_DEBUG("%s card detected\n",
((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
return ret;
}
int radeon_master_create(struct drm_device *dev, struct drm_master *master)
{
struct drm_radeon_master_private *master_priv;
unsigned long sareapage;
int ret;
master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
if (!master_priv)
return -ENOMEM;
/* prebuild the SAREA */
sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
&master_priv->sarea);
if (ret) {
DRM_ERROR("SAREA setup failed\n");
kfree(master_priv);
return ret;
}
master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
master_priv->sarea_priv->pfCurrentPage = 0;
master->driver_priv = master_priv;
return 0;
}
void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
{
struct drm_radeon_master_private *master_priv = master->driver_priv;
if (!master_priv)
return;
if (master_priv->sarea_priv &&
master_priv->sarea_priv->pfCurrentPage != 0)
radeon_cp_dispatch_flip(dev, master);
master_priv->sarea_priv = NULL;
if (master_priv->sarea)
drm_rmmap_locked(dev, master_priv->sarea);
kfree(master_priv);
master->driver_priv = NULL;
}
/* Create mappings for registers and framebuffer so userland doesn't necessarily
* have to find them.
*/
int radeon_driver_firstopen(struct drm_device *dev)
{
int ret;
drm_local_map_t *map;
drm_radeon_private_t *dev_priv = dev->dev_private;
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0);
ret = drm_addmap(dev, dev_priv->fb_aper_offset,
pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER,
_DRM_WRITE_COMBINING, &map);
if (ret != 0)
return ret;
return 0;
}
int radeon_driver_unload(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
drm_rmmap(dev, dev_priv->mmio);
kfree(dev_priv);
dev->dev_private = NULL;
return 0;
}
void radeon_commit_ring(drm_radeon_private_t *dev_priv)
{
int i;
u32 *ring;
int tail_aligned;
/* check if the ring is padded out to 16-dword alignment */
tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1);
if (tail_aligned) {
int num_p2 = RADEON_RING_ALIGN - tail_aligned;
ring = dev_priv->ring.start;
/* pad with some CP_PACKET2 */
for (i = 0; i < num_p2; i++)
ring[dev_priv->ring.tail + i] = CP_PACKET2();
dev_priv->ring.tail += i;
dev_priv->ring.space -= num_p2 * sizeof(u32);
}
dev_priv->ring.tail &= dev_priv->ring.tail_mask;
DRM_MEMORYBARRIER();
GET_RING_HEAD( dev_priv );
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
RADEON_WRITE(R600_CP_RB_WPTR, dev_priv->ring.tail);
/* read from PCI bus to ensure correct posting */
RADEON_READ(R600_CP_RB_RPTR);
} else {
RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail);
/* read from PCI bus to ensure correct posting */
RADEON_READ(RADEON_CP_RB_RPTR);
}
}
| gpl-2.0 |
rmbq/bubba_kernel_3.0 | drivers/input/serio/gscps2.c | 8322 | 11650 | /*
* drivers/input/serio/gscps2.c
*
* Copyright (c) 2004-2006 Helge Deller <deller@gmx.de>
* Copyright (c) 2002 Laurent Canet <canetl@esiee.fr>
* Copyright (c) 2002 Thibaut Varene <varenet@parisc-linux.org>
*
* Pieces of code based on linux-2.4's hp_mouse.c & hp_keyb.c
* Copyright (c) 1999 Alex deVries <alex@onefishtwo.ca>
* Copyright (c) 1999-2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (c) 2000 Xavier Debacker <debackex@esiee.fr>
* Copyright (c) 2000-2001 Thomas Marteau <marteaut@esiee.fr>
*
* HP GSC PS/2 port driver, found in PA/RISC Workstations
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* TODO:
* - Dino testing (did HP ever shipped a machine on which this port
* was usable/enabled ?)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/serio.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/pci_ids.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/parisc-device.h>
MODULE_AUTHOR("Laurent Canet <canetl@esiee.fr>, Thibaut Varene <varenet@parisc-linux.org>, Helge Deller <deller@gmx.de>");
MODULE_DESCRIPTION("HP GSC PS2 port driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(parisc, gscps2_device_tbl);
#define PFX "gscps2.c: "
/*
* Driver constants
*/
/* various constants */
#define ENABLE 1
#define DISABLE 0
#define GSC_DINO_OFFSET 0x0800 /* offset for DINO controller versus LASI one */
/* PS/2 IO port offsets */
#define GSC_ID 0x00 /* device ID offset (see: GSC_ID_XXX) */
#define GSC_RESET 0x00 /* reset port offset */
#define GSC_RCVDATA 0x04 /* receive port offset */
#define GSC_XMTDATA 0x04 /* transmit port offset */
#define GSC_CONTROL 0x08 /* see: Control register bits */
#define GSC_STATUS 0x0C /* see: Status register bits */
/* Control register bits */
#define GSC_CTRL_ENBL 0x01 /* enable interface */
#define GSC_CTRL_LPBXR 0x02 /* loopback operation */
#define GSC_CTRL_DIAG 0x20 /* directly control clock/data line */
#define GSC_CTRL_DATDIR 0x40 /* data line direct control */
#define GSC_CTRL_CLKDIR 0x80 /* clock line direct control */
/* Status register bits */
#define GSC_STAT_RBNE 0x01 /* Receive Buffer Not Empty */
#define GSC_STAT_TBNE 0x02 /* Transmit Buffer Not Empty */
#define GSC_STAT_TERR 0x04 /* Timeout Error */
#define GSC_STAT_PERR 0x08 /* Parity Error */
#define GSC_STAT_CMPINTR 0x10 /* Composite Interrupt = irq on any port */
#define GSC_STAT_DATSHD 0x40 /* Data Line Shadow */
#define GSC_STAT_CLKSHD 0x80 /* Clock Line Shadow */
/* IDs returned by GSC_ID port register */
#define GSC_ID_KEYBOARD 0 /* device ID values */
#define GSC_ID_MOUSE 1
static irqreturn_t gscps2_interrupt(int irq, void *dev);
#define BUFFER_SIZE 0x0f
/* GSC PS/2 port device struct */
struct gscps2port {
struct list_head node;
struct parisc_device *padev;
struct serio *port;
spinlock_t lock;
char *addr;
u8 act, append; /* position in buffer[] */
struct {
u8 data;
u8 str;
} buffer[BUFFER_SIZE+1];
int id;
};
/*
* Various HW level routines
*/
#define gscps2_readb_input(x) readb((x)+GSC_RCVDATA)
#define gscps2_readb_control(x) readb((x)+GSC_CONTROL)
#define gscps2_readb_status(x) readb((x)+GSC_STATUS)
#define gscps2_writeb_control(x, y) writeb((x), (y)+GSC_CONTROL)
/*
* wait_TBE() - wait for Transmit Buffer Empty
*/
static int wait_TBE(char *addr)
{
int timeout = 25000; /* device is expected to react within 250 msec */
while (gscps2_readb_status(addr) & GSC_STAT_TBNE) {
if (!--timeout)
return 0; /* This should not happen */
udelay(10);
}
return 1;
}
/*
* gscps2_flush() - flush the receive buffer
*/
static void gscps2_flush(struct gscps2port *ps2port)
{
while (gscps2_readb_status(ps2port->addr) & GSC_STAT_RBNE)
gscps2_readb_input(ps2port->addr);
ps2port->act = ps2port->append = 0;
}
/*
* gscps2_writeb_output() - write a byte to the port
*
* returns 1 on success, 0 on error
*/
static inline int gscps2_writeb_output(struct gscps2port *ps2port, u8 data)
{
unsigned long flags;
char *addr = ps2port->addr;
if (!wait_TBE(addr)) {
printk(KERN_DEBUG PFX "timeout - could not write byte %#x\n", data);
return 0;
}
while (gscps2_readb_status(ps2port->addr) & GSC_STAT_RBNE)
/* wait */;
spin_lock_irqsave(&ps2port->lock, flags);
writeb(data, addr+GSC_XMTDATA);
spin_unlock_irqrestore(&ps2port->lock, flags);
/* this is ugly, but due to timing of the port it seems to be necessary. */
mdelay(6);
/* make sure any received data is returned as fast as possible */
/* this is important e.g. when we set the LEDs on the keyboard */
gscps2_interrupt(0, NULL);
return 1;
}
/*
* gscps2_enable() - enables or disables the port
*/
static void gscps2_enable(struct gscps2port *ps2port, int enable)
{
unsigned long flags;
u8 data;
/* now enable/disable the port */
spin_lock_irqsave(&ps2port->lock, flags);
gscps2_flush(ps2port);
data = gscps2_readb_control(ps2port->addr);
if (enable)
data |= GSC_CTRL_ENBL;
else
data &= ~GSC_CTRL_ENBL;
gscps2_writeb_control(data, ps2port->addr);
spin_unlock_irqrestore(&ps2port->lock, flags);
wait_TBE(ps2port->addr);
gscps2_flush(ps2port);
}
/*
* gscps2_reset() - resets the PS/2 port
*/
static void gscps2_reset(struct gscps2port *ps2port)
{
char *addr = ps2port->addr;
unsigned long flags;
/* reset the interface */
spin_lock_irqsave(&ps2port->lock, flags);
gscps2_flush(ps2port);
writeb(0xff, addr+GSC_RESET);
gscps2_flush(ps2port);
spin_unlock_irqrestore(&ps2port->lock, flags);
}
static LIST_HEAD(ps2port_list);
/**
* gscps2_interrupt() - Interruption service routine
*
* This function reads received PS/2 bytes and processes them on
* all interfaces.
* The problematic part here is, that the keyboard and mouse PS/2 port
* share the same interrupt and it's not possible to send data if any
* one of them holds input data. To solve this problem we try to receive
* the data as fast as possible and handle the reporting to the upper layer
* later.
*/
static irqreturn_t gscps2_interrupt(int irq, void *dev)
{
struct gscps2port *ps2port;
list_for_each_entry(ps2port, &ps2port_list, node) {
unsigned long flags;
spin_lock_irqsave(&ps2port->lock, flags);
while ( (ps2port->buffer[ps2port->append].str =
gscps2_readb_status(ps2port->addr)) & GSC_STAT_RBNE ) {
ps2port->buffer[ps2port->append].data =
gscps2_readb_input(ps2port->addr);
ps2port->append = ((ps2port->append+1) & BUFFER_SIZE);
}
spin_unlock_irqrestore(&ps2port->lock, flags);
} /* list_for_each_entry */
/* all data was read from the ports - now report the data to upper layer */
list_for_each_entry(ps2port, &ps2port_list, node) {
while (ps2port->act != ps2port->append) {
unsigned int rxflags;
u8 data, status;
/* Did new data arrived while we read existing data ?
If yes, exit now and let the new irq handler start over again */
if (gscps2_readb_status(ps2port->addr) & GSC_STAT_CMPINTR)
return IRQ_HANDLED;
status = ps2port->buffer[ps2port->act].str;
data = ps2port->buffer[ps2port->act].data;
ps2port->act = ((ps2port->act+1) & BUFFER_SIZE);
rxflags = ((status & GSC_STAT_TERR) ? SERIO_TIMEOUT : 0 ) |
((status & GSC_STAT_PERR) ? SERIO_PARITY : 0 );
serio_interrupt(ps2port->port, data, rxflags);
} /* while() */
} /* list_for_each_entry */
return IRQ_HANDLED;
}
/*
* gscps2_write() - send a byte out through the aux interface.
*/
static int gscps2_write(struct serio *port, unsigned char data)
{
struct gscps2port *ps2port = port->port_data;
if (!gscps2_writeb_output(ps2port, data)) {
printk(KERN_DEBUG PFX "sending byte %#x failed.\n", data);
return -1;
}
return 0;
}
/*
* gscps2_open() is called when a port is opened by the higher layer.
* It resets and enables the port.
*/
static int gscps2_open(struct serio *port)
{
struct gscps2port *ps2port = port->port_data;
gscps2_reset(ps2port);
/* enable it */
gscps2_enable(ps2port, ENABLE);
gscps2_interrupt(0, NULL);
return 0;
}
/*
* gscps2_close() disables the port
*/
static void gscps2_close(struct serio *port)
{
struct gscps2port *ps2port = port->port_data;
gscps2_enable(ps2port, DISABLE);
}
/**
* gscps2_probe() - Probes PS2 devices
* @return: success/error report
*/
static int __devinit gscps2_probe(struct parisc_device *dev)
{
struct gscps2port *ps2port;
struct serio *serio;
unsigned long hpa = dev->hpa.start;
int ret;
if (!dev->irq)
return -ENODEV;
/* Offset for DINO PS/2. Works with LASI even */
if (dev->id.sversion == 0x96)
hpa += GSC_DINO_OFFSET;
ps2port = kzalloc(sizeof(struct gscps2port), GFP_KERNEL);
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ps2port || !serio) {
ret = -ENOMEM;
goto fail_nomem;
}
dev_set_drvdata(&dev->dev, ps2port);
ps2port->port = serio;
ps2port->padev = dev;
ps2port->addr = ioremap_nocache(hpa, GSC_STATUS + 4);
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
ps2port->id = readb(ps2port->addr + GSC_ID) & 0x0f;
snprintf(serio->name, sizeof(serio->name), "gsc-ps2-%s",
(ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse");
strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
serio->id.type = SERIO_8042;
serio->write = gscps2_write;
serio->open = gscps2_open;
serio->close = gscps2_close;
serio->port_data = ps2port;
serio->dev.parent = &dev->dev;
ret = -EBUSY;
if (request_irq(dev->irq, gscps2_interrupt, IRQF_SHARED, ps2port->port->name, ps2port))
goto fail_miserably;
if (ps2port->id != GSC_ID_KEYBOARD && ps2port->id != GSC_ID_MOUSE) {
printk(KERN_WARNING PFX "Unsupported PS/2 port at 0x%08lx (id=%d) ignored\n",
hpa, ps2port->id);
ret = -ENODEV;
goto fail;
}
#if 0
if (!request_mem_region(hpa, GSC_STATUS + 4, ps2port->port.name))
goto fail;
#endif
printk(KERN_INFO "serio: %s port at 0x%p irq %d @ %s\n",
ps2port->port->name,
ps2port->addr,
ps2port->padev->irq,
ps2port->port->phys);
serio_register_port(ps2port->port);
list_add_tail(&ps2port->node, &ps2port_list);
return 0;
fail:
free_irq(dev->irq, ps2port);
fail_miserably:
iounmap(ps2port->addr);
release_mem_region(dev->hpa.start, GSC_STATUS + 4);
fail_nomem:
kfree(ps2port);
kfree(serio);
return ret;
}
/**
* gscps2_remove() - Removes PS2 devices
* @return: success/error report
*/
static int __devexit gscps2_remove(struct parisc_device *dev)
{
struct gscps2port *ps2port = dev_get_drvdata(&dev->dev);
serio_unregister_port(ps2port->port);
free_irq(dev->irq, ps2port);
gscps2_flush(ps2port);
list_del(&ps2port->node);
iounmap(ps2port->addr);
#if 0
release_mem_region(dev->hpa, GSC_STATUS + 4);
#endif
dev_set_drvdata(&dev->dev, NULL);
kfree(ps2port);
return 0;
}
static struct parisc_device_id gscps2_device_tbl[] = {
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00084 }, /* LASI PS/2 */
#ifdef DINO_TESTED
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00096 }, /* DINO PS/2 */
#endif
{ 0, } /* 0 terminated list */
};
static struct parisc_driver parisc_ps2_driver = {
.name = "gsc_ps2",
.id_table = gscps2_device_tbl,
.probe = gscps2_probe,
.remove = __devexit_p(gscps2_remove),
};
static int __init gscps2_init(void)
{
register_parisc_driver(&parisc_ps2_driver);
return 0;
}
static void __exit gscps2_exit(void)
{
unregister_parisc_driver(&parisc_ps2_driver);
}
module_init(gscps2_init);
module_exit(gscps2_exit);
| gpl-2.0 |
carz2/Sapphire-2.6.29-sense | drivers/i2c/algos/i2c-algo-bit.c | 131 | 16314 | /* -------------------------------------------------------------------------
* i2c-algo-bit.c i2c driver algorithms for bit-shift adapters
* -------------------------------------------------------------------------
* Copyright (C) 1995-2000 Simon G. Vogl
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* ------------------------------------------------------------------------- */
/* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
<kmalkki@cc.hut.fi> and Jean Delvare <khali@linux-fr.org> */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
/* ----- global defines ----------------------------------------------- */
#ifdef DEBUG
#define bit_dbg(level, dev, format, args...) \
do { \
if (i2c_debug >= level) \
dev_dbg(dev, format, ##args); \
} while (0)
#else
#define bit_dbg(level, dev, format, args...) \
do {} while (0)
#endif /* DEBUG */
/* ----- global variables --------------------------------------------- */
static int bit_test; /* see if the line-setting functions work */
module_param(bit_test, bool, 0);
MODULE_PARM_DESC(bit_test, "Test the lines of the bus to see if it is stuck");
#ifdef DEBUG
static int i2c_debug = 1;
module_param(i2c_debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(i2c_debug,
"debug level - 0 off; 1 normal; 2 verbose; 3 very verbose");
#endif
/* --- setting states on the bus with the right timing: --------------- */
#define setsda(adap, val) adap->setsda(adap->data, val)
#define setscl(adap, val) adap->setscl(adap->data, val)
#define getsda(adap) adap->getsda(adap->data)
#define getscl(adap) adap->getscl(adap->data)
static inline void sdalo(struct i2c_algo_bit_data *adap)
{
setsda(adap, 0);
udelay((adap->udelay + 1) / 2);
}
static inline void sdahi(struct i2c_algo_bit_data *adap)
{
setsda(adap, 1);
udelay((adap->udelay + 1) / 2);
}
static inline void scllo(struct i2c_algo_bit_data *adap)
{
setscl(adap, 0);
udelay(adap->udelay / 2);
}
/*
* Raise scl line, and do checking for delays. This is necessary for slower
* devices.
*/
static int sclhi(struct i2c_algo_bit_data *adap)
{
unsigned long start;
setscl(adap, 1);
/* Not all adapters have scl sense line... */
if (!adap->getscl)
goto done;
start = jiffies;
while (!getscl(adap)) {
/* This hw knows how to read the clock line, so we wait
* until it actually gets high. This is safer as some
* chips may hold it low ("clock stretching") while they
* are processing data internally.
*/
if (time_after_eq(jiffies, start + adap->timeout))
return -ETIMEDOUT;
cond_resched();
}
#ifdef DEBUG
if (jiffies != start && i2c_debug >= 3)
pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go "
"high\n", jiffies - start);
#endif
done:
udelay(adap->udelay);
return 0;
}
/* --- other auxiliary functions -------------------------------------- */
static void i2c_start(struct i2c_algo_bit_data *adap)
{
/* assert: scl, sda are high */
setsda(adap, 0);
udelay(adap->udelay);
scllo(adap);
}
static void i2c_repstart(struct i2c_algo_bit_data *adap)
{
/* assert: scl is low */
sdahi(adap);
sclhi(adap);
setsda(adap, 0);
udelay(adap->udelay);
scllo(adap);
}
static void i2c_stop(struct i2c_algo_bit_data *adap)
{
/* assert: scl is low */
sdalo(adap);
sclhi(adap);
setsda(adap, 1);
udelay(adap->udelay);
}
/* send a byte without start cond., look for arbitration,
check ackn. from slave */
/* returns:
* 1 if the device acknowledged
* 0 if the device did not ack
* -ETIMEDOUT if an error occurred (while raising the scl line)
*/
static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
{
int i;
int sb;
int ack;
struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
/* assert: scl is low */
for (i = 7; i >= 0; i--) {
sb = (c >> i) & 1;
setsda(adap, sb);
udelay((adap->udelay + 1) / 2);
if (sclhi(adap) < 0) { /* timed out */
bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
"timeout at bit #%d\n", (int)c, i);
return -ETIMEDOUT;
}
/* FIXME do arbitration here:
* if (sb && !getsda(adap)) -> ouch! Get out of here.
*
* Report a unique code, so higher level code can retry
* the whole (combined) message and *NOT* issue STOP.
*/
scllo(adap);
}
sdahi(adap);
if (sclhi(adap) < 0) { /* timeout */
bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
"timeout at ack\n", (int)c);
return -ETIMEDOUT;
}
/* read ack: SDA should be pulled down by slave, or it may
* NAK (usually to report problems with the data we wrote).
*/
ack = !getsda(adap); /* ack: sda is pulled low -> success */
bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c,
ack ? "A" : "NA");
scllo(adap);
return ack;
/* assert: scl is low (sda undef) */
}
static int i2c_inb(struct i2c_adapter *i2c_adap)
{
/* read byte via i2c port, without start/stop sequence */
/* acknowledge is sent in i2c_read. */
int i;
unsigned char indata = 0;
struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
/* assert: scl is low */
sdahi(adap);
for (i = 0; i < 8; i++) {
if (sclhi(adap) < 0) { /* timeout */
bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit "
"#%d\n", 7 - i);
return -ETIMEDOUT;
}
indata *= 2;
if (getsda(adap))
indata |= 0x01;
setscl(adap, 0);
udelay(i == 7 ? adap->udelay / 2 : adap->udelay);
}
/* assert: scl is low */
return indata;
}
/*
* Sanity check for the adapter hardware - check the reaction of
* the bus lines only if it seems to be idle.
*/
static int test_bus(struct i2c_algo_bit_data *adap, char *name)
{
int scl, sda;
if (adap->getscl == NULL)
pr_info("%s: Testing SDA only, SCL is not readable\n", name);
sda = getsda(adap);
scl = (adap->getscl == NULL) ? 1 : getscl(adap);
if (!scl || !sda) {
printk(KERN_WARNING "%s: bus seems to be busy\n", name);
goto bailout;
}
sdalo(adap);
sda = getsda(adap);
scl = (adap->getscl == NULL) ? 1 : getscl(adap);
if (sda) {
printk(KERN_WARNING "%s: SDA stuck high!\n", name);
goto bailout;
}
if (!scl) {
printk(KERN_WARNING "%s: SCL unexpected low "
"while pulling SDA low!\n", name);
goto bailout;
}
sdahi(adap);
sda = getsda(adap);
scl = (adap->getscl == NULL) ? 1 : getscl(adap);
if (!sda) {
printk(KERN_WARNING "%s: SDA stuck low!\n", name);
goto bailout;
}
if (!scl) {
printk(KERN_WARNING "%s: SCL unexpected low "
"while pulling SDA high!\n", name);
goto bailout;
}
scllo(adap);
sda = getsda(adap);
scl = (adap->getscl == NULL) ? 0 : getscl(adap);
if (scl) {
printk(KERN_WARNING "%s: SCL stuck high!\n", name);
goto bailout;
}
if (!sda) {
printk(KERN_WARNING "%s: SDA unexpected low "
"while pulling SCL low!\n", name);
goto bailout;
}
sclhi(adap);
sda = getsda(adap);
scl = (adap->getscl == NULL) ? 1 : getscl(adap);
if (!scl) {
printk(KERN_WARNING "%s: SCL stuck low!\n", name);
goto bailout;
}
if (!sda) {
printk(KERN_WARNING "%s: SDA unexpected low "
"while pulling SCL high!\n", name);
goto bailout;
}
pr_info("%s: Test OK\n", name);
return 0;
bailout:
sdahi(adap);
sclhi(adap);
return -ENODEV;
}
/* ----- Utility functions
*/
/* try_address tries to contact a chip for a number of
* times before it gives up.
* return values:
* 1 chip answered
* 0 chip did not answer
* -x transmission error
*/
static int try_address(struct i2c_adapter *i2c_adap,
unsigned char addr, int retries)
{
struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
int i, ret = 0;
for (i = 0; i <= retries; i++) {
ret = i2c_outb(i2c_adap, addr);
if (ret == 1 || i == retries)
break;
bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n");
i2c_stop(adap);
udelay(adap->udelay);
yield();
bit_dbg(3, &i2c_adap->dev, "emitting start condition\n");
i2c_start(adap);
}
if (i && ret)
bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at "
"0x%02x: %s\n", i + 1,
addr & 1 ? "read from" : "write to", addr >> 1,
ret == 1 ? "success" : "failed, timeout?");
return ret;
}
static int sendbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
{
const unsigned char *temp = msg->buf;
int count = msg->len;
unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK;
int retval;
int wrcount = 0;
while (count > 0) {
retval = i2c_outb(i2c_adap, *temp);
/* OK/ACK; or ignored NAK */
if ((retval > 0) || (nak_ok && (retval == 0))) {
count--;
temp++;
wrcount++;
/* A slave NAKing the master means the slave didn't like
* something about the data it saw. For example, maybe
* the SMBus PEC was wrong.
*/
} else if (retval == 0) {
dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n");
return -EIO;
/* Timeout; or (someday) lost arbitration
*
* FIXME Lost ARB implies retrying the transaction from
* the first message, after the "winning" master issues
* its STOP. As a rule, upper layer code has no reason
* to know or care about this ... it is *NOT* an error.
*/
} else {
dev_err(&i2c_adap->dev, "sendbytes: error %d\n",
retval);
return retval;
}
}
return wrcount;
}
static int acknak(struct i2c_adapter *i2c_adap, int is_ack)
{
struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
/* assert: sda is high */
if (is_ack) /* send ack */
setsda(adap, 0);
udelay((adap->udelay + 1) / 2);
if (sclhi(adap) < 0) { /* timeout */
dev_err(&i2c_adap->dev, "readbytes: ack/nak timeout\n");
return -ETIMEDOUT;
}
scllo(adap);
return 0;
}
static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
{
int inval;
int rdcount = 0; /* counts bytes read */
unsigned char *temp = msg->buf;
int count = msg->len;
const unsigned flags = msg->flags;
while (count > 0) {
inval = i2c_inb(i2c_adap);
if (inval >= 0) {
*temp = inval;
rdcount++;
} else { /* read timed out */
break;
}
temp++;
count--;
/* Some SMBus transactions require that we receive the
transaction length as the first read byte. */
if (rdcount == 1 && (flags & I2C_M_RECV_LEN)) {
if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
if (!(flags & I2C_M_NO_RD_ACK))
acknak(i2c_adap, 0);
dev_err(&i2c_adap->dev, "readbytes: invalid "
"block length (%d)\n", inval);
return -EREMOTEIO;
}
/* The original count value accounts for the extra
bytes, that is, either 1 for a regular transaction,
or 2 for a PEC transaction. */
count += inval;
msg->len += inval;
}
bit_dbg(2, &i2c_adap->dev, "readbytes: 0x%02x %s\n",
inval,
(flags & I2C_M_NO_RD_ACK)
? "(no ack/nak)"
: (count ? "A" : "NA"));
if (!(flags & I2C_M_NO_RD_ACK)) {
inval = acknak(i2c_adap, count);
if (inval < 0)
return inval;
}
}
return rdcount;
}
/* doAddress initiates the transfer by generating the start condition (in
* try_address) and transmits the address in the necessary format to handle
* reads, writes as well as 10bit-addresses.
* returns:
* 0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set
* -x an error occurred (like: -EREMOTEIO if the device did not answer, or
* -ETIMEDOUT, for example if the lines are stuck...)
*/
static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
{
unsigned short flags = msg->flags;
unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK;
struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
unsigned char addr;
int ret, retries;
retries = nak_ok ? 0 : i2c_adap->retries;
if (flags & I2C_M_TEN) {
/* a ten bit address */
addr = 0xf0 | ((msg->addr >> 7) & 0x03);
bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
/* try extended address code...*/
ret = try_address(i2c_adap, addr, retries);
if ((ret != 1) && !nak_ok) {
dev_err(&i2c_adap->dev,
"died at extended address code\n");
return -EREMOTEIO;
}
/* the remaining 8 bit address */
ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
if ((ret != 1) && !nak_ok) {
/* the chip did not ack / xmission error occurred */
dev_err(&i2c_adap->dev, "died at 2nd address code\n");
return -EREMOTEIO;
}
if (flags & I2C_M_RD) {
bit_dbg(3, &i2c_adap->dev, "emitting repeated "
"start condition\n");
i2c_repstart(adap);
/* okay, now switch into reading mode */
addr |= 0x01;
ret = try_address(i2c_adap, addr, retries);
if ((ret != 1) && !nak_ok) {
dev_err(&i2c_adap->dev,
"died at repeated address code\n");
return -EREMOTEIO;
}
}
} else { /* normal 7bit address */
addr = msg->addr << 1;
if (flags & I2C_M_RD)
addr |= 1;
if (flags & I2C_M_REV_DIR_ADDR)
addr ^= 1;
ret = try_address(i2c_adap, addr, retries);
if ((ret != 1) && !nak_ok)
return -ENXIO;
}
return 0;
}
static int bit_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
struct i2c_msg *pmsg;
struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
int i, ret;
unsigned short nak_ok;
bit_dbg(3, &i2c_adap->dev, "emitting start condition\n");
i2c_start(adap);
for (i = 0; i < num; i++) {
pmsg = &msgs[i];
nak_ok = pmsg->flags & I2C_M_IGNORE_NAK;
if (!(pmsg->flags & I2C_M_NOSTART)) {
if (i) {
bit_dbg(3, &i2c_adap->dev, "emitting "
"repeated start condition\n");
i2c_repstart(adap);
}
ret = bit_doAddress(i2c_adap, pmsg);
if ((ret != 0) && !nak_ok) {
bit_dbg(1, &i2c_adap->dev, "NAK from "
"device addr 0x%02x msg #%d\n",
msgs[i].addr, i);
goto bailout;
}
}
if (pmsg->flags & I2C_M_RD) {
/* read bytes into buffer*/
ret = readbytes(i2c_adap, pmsg);
if (ret >= 1)
bit_dbg(2, &i2c_adap->dev, "read %d byte%s\n",
ret, ret == 1 ? "" : "s");
if (ret < pmsg->len) {
if (ret >= 0)
ret = -EREMOTEIO;
goto bailout;
}
} else {
/* write bytes from buffer */
ret = sendbytes(i2c_adap, pmsg);
if (ret >= 1)
bit_dbg(2, &i2c_adap->dev, "wrote %d byte%s\n",
ret, ret == 1 ? "" : "s");
if (ret < pmsg->len) {
if (ret >= 0)
ret = -EREMOTEIO;
goto bailout;
}
}
}
ret = i;
bailout:
bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n");
i2c_stop(adap);
return ret;
}
static u32 bit_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
}
/* -----exported algorithm data: ------------------------------------- */
static const struct i2c_algorithm i2c_bit_algo = {
.master_xfer = bit_xfer,
.functionality = bit_func,
};
/*
* registering functions to load algorithms at runtime
*/
static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
{
struct i2c_algo_bit_data *bit_adap = adap->algo_data;
if (bit_test) {
int ret = test_bus(bit_adap, adap->name);
if (ret < 0)
return -ENODEV;
}
/* register new adapter to i2c module... */
adap->algo = &i2c_bit_algo;
adap->timeout = 100; /* default values, should */
adap->retries = 3; /* be replaced by defines */
return 0;
}
int i2c_bit_add_bus(struct i2c_adapter *adap)
{
int err;
err = i2c_bit_prepare_bus(adap);
if (err)
return err;
return i2c_add_adapter(adap);
}
EXPORT_SYMBOL(i2c_bit_add_bus);
int i2c_bit_add_numbered_bus(struct i2c_adapter *adap)
{
int err;
err = i2c_bit_prepare_bus(adap);
if (err)
return err;
return i2c_add_numbered_adapter(adap);
}
EXPORT_SYMBOL(i2c_bit_add_numbered_bus);
MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C-Bus bit-banging algorithm");
MODULE_LICENSE("GPL");
| gpl-2.0 |
TrustZoneGenericDriver/linux | drivers/ata/sata_mv.c | 131 | 125510 | /*
* sata_mv.c - Marvell SATA support
*
* Copyright 2008-2009: Marvell Corporation, all rights reserved.
* Copyright 2005: EMC Corporation, all rights reserved.
* Copyright 2005 Red Hat, Inc. All rights reserved.
*
* Originally written by Brett Russ.
* Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
*
* Please ALWAYS copy linux-ide@vger.kernel.org on emails.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* sata_mv TODO list:
*
* --> Develop a low-power-consumption strategy, and implement it.
*
* --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
*
* --> [Experiment, Marvell value added] Is it possible to use target
* mode to cross-connect two Linux boxes with Marvell cards? If so,
* creating LibATA target mode support would be very interesting.
*
* Target mode, for those without docs, is the ability to directly
* connect two SATA ports.
*/
/*
* 80x1-B2 errata PCI#11:
*
* Users of the 6041/6081 Rev.B2 chips (current is C0)
* should be careful to insert those cards only onto PCI-X bus #0,
* and only in device slots 0..7, not higher. The chips may not
* work correctly otherwise (note: this is a pretty rare condition).
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mbus.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <linux/libata.h>
#define DRV_NAME "sata_mv"
#define DRV_VERSION "1.28"
/*
* module options
*/
#ifdef CONFIG_PCI
static int msi;
module_param(msi, int, S_IRUGO);
MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
#endif
static int irq_coalescing_io_count;
module_param(irq_coalescing_io_count, int, S_IRUGO);
MODULE_PARM_DESC(irq_coalescing_io_count,
"IRQ coalescing I/O count threshold (0..255)");
static int irq_coalescing_usecs;
module_param(irq_coalescing_usecs, int, S_IRUGO);
MODULE_PARM_DESC(irq_coalescing_usecs,
"IRQ coalescing time threshold in usecs");
enum {
/* BAR's are enumerated in terms of pci_resource_start() terms */
MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
MV_IO_BAR = 2, /* offset 0x18: IO space */
MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
MAX_COAL_IO_COUNT = 255, /* completed I/O count */
MV_PCI_REG_BASE = 0,
/*
* Per-chip ("all ports") interrupt coalescing feature.
* This is only for GEN_II / GEN_IIE hardware.
*
* Coalescing defers the interrupt until either the IO_THRESHOLD
* (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
*/
COAL_REG_BASE = 0x18000,
IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
/*
* Registers for the (unused here) transaction coalescing feature:
*/
TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
SATAHC0_REG_BASE = 0x20000,
FLASH_CTL = 0x1046c,
GPIO_PORT_CTL = 0x104f0,
RESET_CFG = 0x180d8,
MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
MV_MAX_Q_DEPTH = 32,
MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
/* CRQB needs alignment on a 1KB boundary. Size == 1KB
* CRPB needs alignment on a 256B boundary. Size == 256B
* ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
*/
MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
MV_MAX_SG_CT = 256,
MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
MV_PORT_HC_SHIFT = 2,
MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
/* Host Flags */
MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
CRQB_FLAG_READ = (1 << 0),
CRQB_TAG_SHIFT = 1,
CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
CRQB_CMD_ADDR_SHIFT = 8,
CRQB_CMD_CS = (0x2 << 11),
CRQB_CMD_LAST = (1 << 15),
CRPB_FLAG_STATUS_SHIFT = 8,
CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
EPRD_FLAG_END_OF_TBL = (1 << 31),
/* PCI interface registers */
MV_PCI_COMMAND = 0xc00,
MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
PCI_MAIN_CMD_STS = 0xd30,
STOP_PCI_MASTER = (1 << 2),
PCI_MASTER_EMPTY = (1 << 3),
GLOB_SFT_RST = (1 << 4),
MV_PCI_MODE = 0xd00,
MV_PCI_MODE_MASK = 0x30,
MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
MV_PCI_DISC_TIMER = 0xd04,
MV_PCI_MSI_TRIGGER = 0xc38,
MV_PCI_SERR_MASK = 0xc28,
MV_PCI_XBAR_TMOUT = 0x1d04,
MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
MV_PCI_ERR_ATTRIBUTE = 0x1d48,
MV_PCI_ERR_COMMAND = 0x1d50,
PCI_IRQ_CAUSE = 0x1d58,
PCI_IRQ_MASK = 0x1d5c,
PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
PCIE_IRQ_CAUSE = 0x1900,
PCIE_IRQ_MASK = 0x1910,
PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
PCI_HC_MAIN_IRQ_MASK = 0x1d64,
SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
SOC_HC_MAIN_IRQ_MASK = 0x20024,
ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
PCI_ERR = (1 << 18),
TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
GPIO_INT = (1 << 22),
SELF_INT = (1 << 23),
TWSI_INT = (1 << 24),
HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
/* SATAHC registers */
HC_CFG = 0x00,
HC_IRQ_CAUSE = 0x14,
DMA_IRQ = (1 << 0), /* shift by port # */
HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
DEV_IRQ = (1 << 8), /* shift by port # */
/*
* Per-HC (Host-Controller) interrupt coalescing feature.
* This is present on all chip generations.
*
* Coalescing defers the interrupt until either the IO_THRESHOLD
* (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
*/
HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
SOC_LED_CTRL = 0x2c,
SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
/* with dev activity LED */
/* Shadow block registers */
SHD_BLK = 0x100,
SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
/* SATA registers */
SATA_STATUS = 0x300, /* ctrl, err regs follow status */
SATA_ACTIVE = 0x350,
FIS_IRQ_CAUSE = 0x364,
FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
LTMODE = 0x30c, /* requires read-after-write */
LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
PHY_MODE2 = 0x330,
PHY_MODE3 = 0x310,
PHY_MODE4 = 0x314, /* requires read-after-write */
PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
SATA_IFCTL = 0x344,
SATA_TESTCTL = 0x348,
SATA_IFSTAT = 0x34c,
VENDOR_UNIQUE_FIS = 0x35c,
FISCFG = 0x360,
FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
PHY_MODE9_GEN2 = 0x398,
PHY_MODE9_GEN1 = 0x39c,
PHYCFG_OFS = 0x3a0, /* only in 65n devices */
MV5_PHY_MODE = 0x74,
MV5_LTMODE = 0x30,
MV5_PHY_CTL = 0x0C,
SATA_IFCFG = 0x050,
LP_PHY_CTL = 0x058,
MV_M2_PREAMP_MASK = 0x7e0,
/* Port registers */
EDMA_CFG = 0,
EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
EDMA_ERR_IRQ_CAUSE = 0x8,
EDMA_ERR_IRQ_MASK = 0xc,
EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
EDMA_ERR_DEV = (1 << 2), /* device error */
EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
EDMA_ERR_OVERRUN_5 = (1 << 5),
EDMA_ERR_UNDERRUN_5 = (1 << 6),
EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
EDMA_ERR_LNK_CTRL_RX_1 |
EDMA_ERR_LNK_CTRL_RX_3 |
EDMA_ERR_LNK_CTRL_TX,
EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
EDMA_ERR_DEV_DCON |
EDMA_ERR_DEV_CON |
EDMA_ERR_SERR |
EDMA_ERR_SELF_DIS |
EDMA_ERR_CRQB_PAR |
EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR |
EDMA_ERR_IORDY |
EDMA_ERR_LNK_CTRL_RX_2 |
EDMA_ERR_LNK_DATA_RX |
EDMA_ERR_LNK_DATA_TX |
EDMA_ERR_TRANS_PROTO,
EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
EDMA_ERR_DEV_DCON |
EDMA_ERR_DEV_CON |
EDMA_ERR_OVERRUN_5 |
EDMA_ERR_UNDERRUN_5 |
EDMA_ERR_SELF_DIS_5 |
EDMA_ERR_CRQB_PAR |
EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR |
EDMA_ERR_IORDY,
EDMA_REQ_Q_BASE_HI = 0x10,
EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
EDMA_REQ_Q_OUT_PTR = 0x18,
EDMA_REQ_Q_PTR_SHIFT = 5,
EDMA_RSP_Q_BASE_HI = 0x1c,
EDMA_RSP_Q_IN_PTR = 0x20,
EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
EDMA_RSP_Q_PTR_SHIFT = 3,
EDMA_CMD = 0x28, /* EDMA command register */
EDMA_EN = (1 << 0), /* enable EDMA */
EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
EDMA_STATUS = 0x30, /* EDMA engine status */
EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
EDMA_IORDY_TMOUT = 0x34,
EDMA_ARB_CFG = 0x38,
EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
BMDMA_CMD = 0x224, /* bmdma command register */
BMDMA_STATUS = 0x228, /* bmdma status register */
BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
/* Host private flags (hp_flags) */
MV_HP_FLAG_MSI = (1 << 0),
MV_HP_ERRATA_50XXB0 = (1 << 1),
MV_HP_ERRATA_50XXB2 = (1 << 2),
MV_HP_ERRATA_60X1B2 = (1 << 3),
MV_HP_ERRATA_60X1C0 = (1 << 4),
MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
};
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
enum {
/* DMA boundary 0xffff is required by the s/g splitting
* we need on /length/ in mv_fill-sg().
*/
MV_DMA_BOUNDARY = 0xffffU,
/* mask of register bits containing lower 32 bits
* of EDMA request queue DMA address
*/
EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
/* ditto, for response queue */
EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
};
enum chip_type {
chip_504x,
chip_508x,
chip_5080,
chip_604x,
chip_608x,
chip_6042,
chip_7042,
chip_soc,
};
/* Command ReQuest Block: 32B */
struct mv_crqb {
__le32 sg_addr;
__le32 sg_addr_hi;
__le16 ctrl_flags;
__le16 ata_cmd[11];
};
struct mv_crqb_iie {
__le32 addr;
__le32 addr_hi;
__le32 flags;
__le32 len;
__le32 ata_cmd[4];
};
/* Command ResPonse Block: 8B */
struct mv_crpb {
__le16 id;
__le16 flags;
__le32 tmstmp;
};
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {
__le32 addr;
__le32 flags_size;
__le32 addr_hi;
__le32 reserved;
};
/*
* We keep a local cache of a few frequently accessed port
* registers here, to avoid having to read them (very slow)
* when switching between EDMA and non-EDMA modes.
*/
struct mv_cached_regs {
u32 fiscfg;
u32 ltmode;
u32 haltcond;
u32 unknown_rsvd;
};
struct mv_port_priv {
struct mv_crqb *crqb;
dma_addr_t crqb_dma;
struct mv_crpb *crpb;
dma_addr_t crpb_dma;
struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
unsigned int req_idx;
unsigned int resp_idx;
u32 pp_flags;
struct mv_cached_regs cached;
unsigned int delayed_eh_pmp_map;
};
struct mv_port_signal {
u32 amps;
u32 pre;
};
struct mv_host_priv {
u32 hp_flags;
unsigned int board_idx;
u32 main_irq_mask;
struct mv_port_signal signal[8];
const struct mv_hw_ops *ops;
int n_ports;
void __iomem *base;
void __iomem *main_irq_cause_addr;
void __iomem *main_irq_mask_addr;
u32 irq_cause_offset;
u32 irq_mask_offset;
u32 unmask_all_irqs;
/*
* Needed on some devices that require their clocks to be enabled.
* These are optional: if the platform device does not have any
* clocks, they won't be used. Also, if the underlying hardware
* does not support the common clock framework (CONFIG_HAVE_CLK=n),
* all the clock operations become no-ops (see clk.h).
*/
struct clk *clk;
struct clk **port_clks;
/*
* Some devices have a SATA PHY which can be enabled/disabled
* in order to save power. These are optional: if the platform
* devices does not have any phy, they won't be used.
*/
struct phy **port_phys;
/*
* These consistent DMA memory pools give us guaranteed
* alignment for hardware-accessed data structures,
* and less memory waste in accomplishing the alignment.
*/
struct dma_pool *crqb_pool;
struct dma_pool *crpb_pool;
struct dma_pool *sg_tbl_pool;
};
struct mv_hw_ops {
void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
};
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc);
static void mv_qc_prep(struct ata_queued_cmd *qc);
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
static void mv6_dev_config(struct ata_device *dev);
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
void __iomem *mmio);
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int n_hc);
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
void __iomem *mmio);
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int port);
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no);
static int mv_stop_edma(struct ata_port *ap);
static int mv_stop_edma_engine(void __iomem *port_mmio);
static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
static void mv_pmp_select(struct ata_port *ap, int pmp);
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int mv_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void mv_pmp_error_handler(struct ata_port *ap);
static void mv_process_crpb_entries(struct ata_port *ap,
struct mv_port_priv *pp);
static void mv_sff_irq_clear(struct ata_port *ap);
static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
static void mv_bmdma_setup(struct ata_queued_cmd *qc);
static void mv_bmdma_start(struct ata_queued_cmd *qc);
static void mv_bmdma_stop(struct ata_queued_cmd *qc);
static u8 mv_bmdma_status(struct ata_port *ap);
static u8 mv_sff_check_status(struct ata_port *ap);
/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
* because we have to allow room for worst case splitting of
* PRDs for 64K boundaries in mv_fill_sg().
*/
#ifdef CONFIG_PCI
static struct scsi_host_template mv5_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
};
#endif
static struct scsi_host_template mv6_sht = {
ATA_NCQ_SHT(DRV_NAME),
.can_queue = MV_MAX_Q_DEPTH - 1,
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
};
static struct ata_port_operations mv5_ops = {
.inherits = &ata_sff_port_ops,
.lost_interrupt = ATA_OP_NULL,
.qc_defer = mv_qc_defer,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
.hardreset = mv_hardreset,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
.port_start = mv_port_start,
.port_stop = mv_port_stop,
};
static struct ata_port_operations mv6_ops = {
.inherits = &ata_bmdma_port_ops,
.lost_interrupt = ATA_OP_NULL,
.qc_defer = mv_qc_defer,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.dev_config = mv6_dev_config,
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
.hardreset = mv_hardreset,
.softreset = mv_softreset,
.pmp_hardreset = mv_pmp_hardreset,
.pmp_softreset = mv_softreset,
.error_handler = mv_pmp_error_handler,
.scr_read = mv_scr_read,
.scr_write = mv_scr_write,
.sff_check_status = mv_sff_check_status,
.sff_irq_clear = mv_sff_irq_clear,
.check_atapi_dma = mv_check_atapi_dma,
.bmdma_setup = mv_bmdma_setup,
.bmdma_start = mv_bmdma_start,
.bmdma_stop = mv_bmdma_stop,
.bmdma_status = mv_bmdma_status,
.port_start = mv_port_start,
.port_stop = mv_port_stop,
};
static struct ata_port_operations mv_iie_ops = {
.inherits = &mv6_ops,
.dev_config = ATA_OP_NULL,
.qc_prep = mv_qc_prep_iie,
};
static const struct ata_port_info mv_port_info[] = {
{ /* chip_504x */
.flags = MV_GEN_I_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_508x */
.flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_5080 */
.flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_604x */
.flags = MV_GEN_II_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_608x */
.flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_6042 */
.flags = MV_GEN_IIE_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
{ /* chip_7042 */
.flags = MV_GEN_IIE_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
{ /* chip_soc */
.flags = MV_GEN_IIE_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
};
static const struct pci_device_id mv_pci_tbl[] = {
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
/* RocketRAID 1720/174x have different identifiers */
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
/* Adaptec 1430SA */
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
/* Marvell 7042 support */
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
/* Highpoint RocketRAID PCIe series */
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
{ } /* terminate list */
};
static const struct mv_hw_ops mv5xxx_ops = {
.phy_errata = mv5_phy_errata,
.enable_leds = mv5_enable_leds,
.read_preamp = mv5_read_preamp,
.reset_hc = mv5_reset_hc,
.reset_flash = mv5_reset_flash,
.reset_bus = mv5_reset_bus,
};
static const struct mv_hw_ops mv6xxx_ops = {
.phy_errata = mv6_phy_errata,
.enable_leds = mv6_enable_leds,
.read_preamp = mv6_read_preamp,
.reset_hc = mv6_reset_hc,
.reset_flash = mv6_reset_flash,
.reset_bus = mv_reset_pci_bus,
};
static const struct mv_hw_ops mv_soc_ops = {
.phy_errata = mv6_phy_errata,
.enable_leds = mv_soc_enable_leds,
.read_preamp = mv_soc_read_preamp,
.reset_hc = mv_soc_reset_hc,
.reset_flash = mv_soc_reset_flash,
.reset_bus = mv_soc_reset_bus,
};
static const struct mv_hw_ops mv_soc_65n_ops = {
.phy_errata = mv_soc_65n_phy_errata,
.enable_leds = mv_soc_enable_leds,
.reset_hc = mv_soc_reset_hc,
.reset_flash = mv_soc_reset_flash,
.reset_bus = mv_soc_reset_bus,
};
/*
* Functions
*/
static inline void writelfl(unsigned long data, void __iomem *addr)
{
writel(data, addr);
(void) readl(addr); /* flush to avoid PCI posted write */
}
static inline unsigned int mv_hc_from_port(unsigned int port)
{
return port >> MV_PORT_HC_SHIFT;
}
static inline unsigned int mv_hardport_from_port(unsigned int port)
{
return port & MV_PORT_MASK;
}
/*
* Consolidate some rather tricky bit shift calculations.
* This is hot-path stuff, so not a function.
* Simple code, with two return values, so macro rather than inline.
*
* port is the sole input, in range 0..7.
* shift is one output, for use with main_irq_cause / main_irq_mask registers.
* hardport is the other output, in range 0..3.
*
* Note that port and hardport may be the same variable in some cases.
*/
#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
{ \
shift = mv_hc_from_port(port) * HC_SHIFT; \
hardport = mv_hardport_from_port(port); \
shift += hardport * 2; \
}
static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
{
return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
}
static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
unsigned int port)
{
return mv_hc_base(base, mv_hc_from_port(port));
}
static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
{
return mv_hc_base_from_port(base, port) +
MV_SATAHC_ARBTR_REG_SZ +
(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
}
static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
{
void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
return hc_mmio + ofs;
}
static inline void __iomem *mv_host_base(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
return hpriv->base;
}
static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
return mv_port_base(mv_host_base(ap->host), ap->port_no);
}
static inline int mv_get_hc_count(unsigned long port_flags)
{
return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
}
/**
* mv_save_cached_regs - (re-)initialize cached port registers
* @ap: the port whose registers we are caching
*
* Initialize the local cache of port registers,
* so that reading them over and over again can
* be avoided on the hotter paths of this driver.
* This saves a few microseconds each time we switch
* to/from EDMA mode to perform (eg.) a drive cache flush.
*/
static void mv_save_cached_regs(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
pp->cached.fiscfg = readl(port_mmio + FISCFG);
pp->cached.ltmode = readl(port_mmio + LTMODE);
pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
}
/**
* mv_write_cached_reg - write to a cached port register
* @addr: hardware address of the register
* @old: pointer to cached value of the register
* @new: new value for the register
*
* Write a new value to a cached register,
* but only if the value is different from before.
*/
static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
{
if (new != *old) {
unsigned long laddr;
*old = new;
/*
* Workaround for 88SX60x1-B2 FEr SATA#13:
* Read-after-write is needed to prevent generating 64-bit
* write cycles on the PCI bus for SATA interface registers
* at offsets ending in 0x4 or 0xc.
*
* Looks like a lot of fuss, but it avoids an unnecessary
* +1 usec read-after-write delay for unaffected registers.
*/
laddr = (long)addr & 0xffff;
if (laddr >= 0x300 && laddr <= 0x33c) {
laddr &= 0x000f;
if (laddr == 0x4 || laddr == 0xc) {
writelfl(new, addr); /* read after write */
return;
}
}
writel(new, addr); /* unaffected by the errata */
}
}
static void mv_set_edma_ptrs(void __iomem *port_mmio,
struct mv_host_priv *hpriv,
struct mv_port_priv *pp)
{
u32 index;
/*
* initialize request queue
*/
pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
WARN_ON(pp->crqb_dma & 0x3ff);
writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
port_mmio + EDMA_REQ_Q_IN_PTR);
writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
/*
* initialize response queue
*/
pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
WARN_ON(pp->crpb_dma & 0xff);
writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
port_mmio + EDMA_RSP_Q_OUT_PTR);
}
static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
{
/*
* When writing to the main_irq_mask in hardware,
* we must ensure exclusivity between the interrupt coalescing bits
* and the corresponding individual port DONE_IRQ bits.
*
* Note that this register is really an "IRQ enable" register,
* not an "IRQ mask" register as Marvell's naming might suggest.
*/
if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
mask &= ~DONE_IRQ_0_3;
if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
mask &= ~DONE_IRQ_4_7;
writelfl(mask, hpriv->main_irq_mask_addr);
}
static void mv_set_main_irq_mask(struct ata_host *host,
u32 disable_bits, u32 enable_bits)
{
struct mv_host_priv *hpriv = host->private_data;
u32 old_mask, new_mask;
old_mask = hpriv->main_irq_mask;
new_mask = (old_mask & ~disable_bits) | enable_bits;
if (new_mask != old_mask) {
hpriv->main_irq_mask = new_mask;
mv_write_main_irq_mask(new_mask, hpriv);
}
}
static void mv_enable_port_irqs(struct ata_port *ap,
unsigned int port_bits)
{
unsigned int shift, hardport, port = ap->port_no;
u32 disable_bits, enable_bits;
MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
enable_bits = port_bits << shift;
mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
}
static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
void __iomem *port_mmio,
unsigned int port_irqs)
{
struct mv_host_priv *hpriv = ap->host->private_data;
int hardport = mv_hardport_from_port(ap->port_no);
void __iomem *hc_mmio = mv_hc_base_from_port(
mv_host_base(ap->host), ap->port_no);
u32 hc_irq_cause;
/* clear EDMA event indicators, if any */
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
/* clear pending irq events */
hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
/* clear FIS IRQ Cause */
if (IS_GEN_IIE(hpriv))
writelfl(0, port_mmio + FIS_IRQ_CAUSE);
mv_enable_port_irqs(ap, port_irqs);
}
static void mv_set_irq_coalescing(struct ata_host *host,
unsigned int count, unsigned int usecs)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base, *hc_mmio;
u32 coal_enable = 0;
unsigned long flags;
unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
ALL_PORTS_COAL_DONE;
/* Disable IRQ coalescing if either threshold is zero */
if (!usecs || !count) {
clks = count = 0;
} else {
/* Respect maximum limits of the hardware */
clks = usecs * COAL_CLOCKS_PER_USEC;
if (clks > MAX_COAL_TIME_THRESHOLD)
clks = MAX_COAL_TIME_THRESHOLD;
if (count > MAX_COAL_IO_COUNT)
count = MAX_COAL_IO_COUNT;
}
spin_lock_irqsave(&host->lock, flags);
mv_set_main_irq_mask(host, coal_disable, 0);
if (is_dual_hc && !IS_GEN_I(hpriv)) {
/*
* GEN_II/GEN_IIE with dual host controllers:
* one set of global thresholds for the entire chip.
*/
writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
/* clear leftover coal IRQ bit */
writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
if (count)
coal_enable = ALL_PORTS_COAL_DONE;
clks = count = 0; /* force clearing of regular regs below */
}
/*
* All chips: independent thresholds for each HC on the chip.
*/
hc_mmio = mv_hc_base_from_port(mmio, 0);
writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
if (count)
coal_enable |= PORTS_0_3_COAL_DONE;
if (is_dual_hc) {
hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
if (count)
coal_enable |= PORTS_4_7_COAL_DONE;
}
mv_set_main_irq_mask(host, 0, coal_enable);
spin_unlock_irqrestore(&host->lock, flags);
}
/**
* mv_start_edma - Enable eDMA engine
* @base: port base address
* @pp: port private data
*
* Verify the local cache of the eDMA state is accurate with a
* WARN_ON.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
struct mv_port_priv *pp, u8 protocol)
{
int want_ncq = (protocol == ATA_PROT_NCQ);
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
if (want_ncq != using_ncq)
mv_stop_edma(ap);
}
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
struct mv_host_priv *hpriv = ap->host->private_data;
mv_edma_cfg(ap, want_ncq, 1);
mv_set_edma_ptrs(port_mmio, hpriv, pp);
mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
writelfl(EDMA_EN, port_mmio + EDMA_CMD);
pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
}
}
static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
const int per_loop = 5, timeout = (15 * 1000 / per_loop);
int i;
/*
* Wait for the EDMA engine to finish transactions in progress.
* No idea what a good "timeout" value might be, but measurements
* indicate that it often requires hundreds of microseconds
* with two drives in-use. So we use the 15msec value above
* as a rough guess at what even more drives might require.
*/
for (i = 0; i < timeout; ++i) {
u32 edma_stat = readl(port_mmio + EDMA_STATUS);
if ((edma_stat & empty_idle) == empty_idle)
break;
udelay(per_loop);
}
/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
}
/**
* mv_stop_edma_engine - Disable eDMA engine
* @port_mmio: io base address
*
* LOCKING:
* Inherited from caller.
*/
static int mv_stop_edma_engine(void __iomem *port_mmio)
{
int i;
/* Disable eDMA. The disable bit auto clears. */
writelfl(EDMA_DS, port_mmio + EDMA_CMD);
/* Wait for the chip to confirm eDMA is off. */
for (i = 10000; i > 0; i--) {
u32 reg = readl(port_mmio + EDMA_CMD);
if (!(reg & EDMA_EN))
return 0;
udelay(10);
}
return -EIO;
}
static int mv_stop_edma(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
int err = 0;
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
return 0;
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
mv_wait_for_edma_empty_idle(ap);
if (mv_stop_edma_engine(port_mmio)) {
ata_port_err(ap, "Unable to stop eDMA\n");
err = -EIO;
}
mv_edma_cfg(ap, 0, 0);
return err;
}
#ifdef ATA_DEBUG
static void mv_dump_mem(void __iomem *start, unsigned bytes)
{
int b, w;
for (b = 0; b < bytes; ) {
DPRINTK("%p: ", start + b);
for (w = 0; b < bytes && w < 4; w++) {
printk("%08x ", readl(start + b));
b += sizeof(u32);
}
printk("\n");
}
}
#endif
#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{
#ifdef ATA_DEBUG
int b, w;
u32 dw;
for (b = 0; b < bytes; ) {
DPRINTK("%02x: ", b);
for (w = 0; b < bytes && w < 4; w++) {
(void) pci_read_config_dword(pdev, b, &dw);
printk("%08x ", dw);
b += sizeof(u32);
}
printk("\n");
}
#endif
}
#endif
static void mv_dump_all_regs(void __iomem *mmio_base, int port,
struct pci_dev *pdev)
{
#ifdef ATA_DEBUG
void __iomem *hc_base = mv_hc_base(mmio_base,
port >> MV_PORT_HC_SHIFT);
void __iomem *port_base;
int start_port, num_ports, p, start_hc, num_hcs, hc;
if (0 > port) {
start_hc = start_port = 0;
num_ports = 8; /* shld be benign for 4 port devs */
num_hcs = 2;
} else {
start_hc = port >> MV_PORT_HC_SHIFT;
start_port = port;
num_ports = num_hcs = 1;
}
DPRINTK("All registers for port(s) %u-%u:\n", start_port,
num_ports > 1 ? num_ports - 1 : start_port);
if (NULL != pdev) {
DPRINTK("PCI config space regs:\n");
mv_dump_pci_cfg(pdev, 0x68);
}
DPRINTK("PCI regs:\n");
mv_dump_mem(mmio_base+0xc00, 0x3c);
mv_dump_mem(mmio_base+0xd00, 0x34);
mv_dump_mem(mmio_base+0xf00, 0x4);
mv_dump_mem(mmio_base+0x1d00, 0x6c);
for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
hc_base = mv_hc_base(mmio_base, hc);
DPRINTK("HC regs (HC %i):\n", hc);
mv_dump_mem(hc_base, 0x1c);
}
for (p = start_port; p < start_port + num_ports; p++) {
port_base = mv_port_base(mmio_base, p);
DPRINTK("EDMA regs (port %i):\n", p);
mv_dump_mem(port_base, 0x54);
DPRINTK("SATA regs (port %i):\n", p);
mv_dump_mem(port_base+0x300, 0x60);
}
#endif
}
static unsigned int mv_scr_offset(unsigned int sc_reg_in)
{
unsigned int ofs;
switch (sc_reg_in) {
case SCR_STATUS:
case SCR_CONTROL:
case SCR_ERROR:
ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
break;
case SCR_ACTIVE:
ofs = SATA_ACTIVE; /* active is not with the others */
break;
default:
ofs = 0xffffffffU;
break;
}
return ofs;
}
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
*val = readl(mv_ap_base(link->ap) + ofs);
return 0;
} else
return -EINVAL;
}
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
void __iomem *addr = mv_ap_base(link->ap) + ofs;
struct mv_host_priv *hpriv = link->ap->host->private_data;
if (sc_reg_in == SCR_CONTROL) {
/*
* Workaround for 88SX60x1 FEr SATA#26:
*
* COMRESETs have to take care not to accidentally
* put the drive to sleep when writing SCR_CONTROL.
* Setting bits 12..15 prevents this problem.
*
* So if we see an outbound COMMRESET, set those bits.
* Ditto for the followup write that clears the reset.
*
* The proprietary driver does this for
* all chip versions, and so do we.
*/
if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
val |= 0xf000;
if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
void __iomem *lp_phy_addr =
mv_ap_base(link->ap) + LP_PHY_CTL;
/*
* Set PHY speed according to SControl speed.
*/
if ((val & 0xf0) == 0x10)
writelfl(0x7, lp_phy_addr);
else
writelfl(0x227, lp_phy_addr);
}
}
writelfl(val, addr);
return 0;
} else
return -EINVAL;
}
static void mv6_dev_config(struct ata_device *adev)
{
/*
* Deal with Gen-II ("mv6") hardware quirks/restrictions:
*
* Gen-II does not support NCQ over a port multiplier
* (no FIS-based switching).
*/
if (adev->flags & ATA_DFLAG_NCQ) {
if (sata_pmp_attached(adev->link->ap)) {
adev->flags &= ~ATA_DFLAG_NCQ;
ata_dev_info(adev,
"NCQ disabled for command-based switching\n");
}
}
}
static int mv_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_link *link = qc->dev->link;
struct ata_port *ap = link->ap;
struct mv_port_priv *pp = ap->private_data;
/*
* Don't allow new commands if we're in a delayed EH state
* for NCQ and/or FIS-based switching.
*/
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
return ATA_DEFER_PORT;
/* PIO commands need exclusive link: no other commands [DMA or PIO]
* can run concurrently.
* set excl_link when we want to send a PIO command in DMA mode
* or a non-NCQ command in NCQ mode.
* When we receive a command from that link, and there are no
* outstanding commands, mark a flag to clear excl_link and let
* the command go through.
*/
if (unlikely(ap->excl_link)) {
if (link == ap->excl_link) {
if (ap->nr_active_links)
return ATA_DEFER_PORT;
qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
return 0;
} else
return ATA_DEFER_PORT;
}
/*
* If the port is completely idle, then allow the new qc.
*/
if (ap->nr_active_links == 0)
return 0;
/*
* The port is operating in host queuing mode (EDMA) with NCQ
* enabled, allow multiple NCQ commands. EDMA also allows
* queueing multiple DMA commands but libata core currently
* doesn't allow it.
*/
if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
if (ata_is_ncq(qc->tf.protocol))
return 0;
else {
ap->excl_link = link;
return ATA_DEFER_PORT;
}
}
return ATA_DEFER_PORT;
}
static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
{
struct mv_port_priv *pp = ap->private_data;
void __iomem *port_mmio;
u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
u32 ltmode, *old_ltmode = &pp->cached.ltmode;
u32 haltcond, *old_haltcond = &pp->cached.haltcond;
ltmode = *old_ltmode & ~LTMODE_BIT8;
haltcond = *old_haltcond | EDMA_ERR_DEV;
if (want_fbs) {
fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
ltmode = *old_ltmode | LTMODE_BIT8;
if (want_ncq)
haltcond &= ~EDMA_ERR_DEV;
else
fiscfg |= FISCFG_WAIT_DEV_ERR;
} else {
fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
}
port_mmio = mv_ap_base(ap);
mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
}
static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
{
struct mv_host_priv *hpriv = ap->host->private_data;
u32 old, new;
/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
old = readl(hpriv->base + GPIO_PORT_CTL);
if (want_ncq)
new = old | (1 << 22);
else
new = old & ~(1 << 22);
if (new != old)
writel(new, hpriv->base + GPIO_PORT_CTL);
}
/**
* mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
* @ap: Port being initialized
*
* There are two DMA modes on these chips: basic DMA, and EDMA.
*
* Bit-0 of the "EDMA RESERVED" register enables/disables use
* of basic DMA on the GEN_IIE versions of the chips.
*
* This bit survives EDMA resets, and must be set for basic DMA
* to function, and should be cleared when EDMA is active.
*/
static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
{
struct mv_port_priv *pp = ap->private_data;
u32 new, *old = &pp->cached.unknown_rsvd;
if (enable_bmdma)
new = *old | 1;
else
new = *old & ~1;
mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
}
/*
* SOC chips have an issue whereby the HDD LEDs don't always blink
* during I/O when NCQ is enabled. Enabling a special "LED blink" mode
* of the SOC takes care of it, generating a steady blink rate when
* any drive on the chip is active.
*
* Unfortunately, the blink mode is a global hardware setting for the SOC,
* so we must use it whenever at least one port on the SOC has NCQ enabled.
*
* We turn "LED blink" off when NCQ is not in use anywhere, because the normal
* LED operation works then, and provides better (more accurate) feedback.
*
* Note that this code assumes that an SOC never has more than one HC onboard.
*/
static void mv_soc_led_blink_enable(struct ata_port *ap)
{
struct ata_host *host = ap->host;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *hc_mmio;
u32 led_ctrl;
if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
return;
hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
}
static void mv_soc_led_blink_disable(struct ata_port *ap)
{
struct ata_host *host = ap->host;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *hc_mmio;
u32 led_ctrl;
unsigned int port;
if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
return;
/* disable led-blink only if no ports are using NCQ */
for (port = 0; port < hpriv->n_ports; port++) {
struct ata_port *this_ap = host->ports[port];
struct mv_port_priv *pp = this_ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
return;
}
hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
}
static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
{
u32 cfg;
struct mv_port_priv *pp = ap->private_data;
struct mv_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = mv_ap_base(ap);
/* set up non-NCQ EDMA configuration */
cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
pp->pp_flags &=
~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
if (IS_GEN_I(hpriv))
cfg |= (1 << 8); /* enab config burst size mask */
else if (IS_GEN_II(hpriv)) {
cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
mv_60x1_errata_sata25(ap, want_ncq);
} else if (IS_GEN_IIE(hpriv)) {
int want_fbs = sata_pmp_attached(ap);
/*
* Possible future enhancement:
*
* The chip can use FBS with non-NCQ, if we allow it,
* But first we need to have the error handling in place
* for this mode (datasheet section 7.3.15.4.2.3).
* So disallow non-NCQ FBS for now.
*/
want_fbs &= want_ncq;
mv_config_fbs(ap, want_ncq, want_fbs);
if (want_fbs) {
pp->pp_flags |= MV_PP_FLAG_FBS_EN;
cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
}
cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
if (want_edma) {
cfg |= (1 << 22); /* enab 4-entry host queue cache */
if (!IS_SOC(hpriv))
cfg |= (1 << 18); /* enab early completion */
}
if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
mv_bmdma_enable_iie(ap, !want_edma);
if (IS_SOC(hpriv)) {
if (want_ncq)
mv_soc_led_blink_enable(ap);
else
mv_soc_led_blink_disable(ap);
}
}
if (want_ncq) {
cfg |= EDMA_CFG_NCQ;
pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
}
writelfl(cfg, port_mmio + EDMA_CFG);
}
static void mv_port_free_dma_mem(struct ata_port *ap)
{
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp = ap->private_data;
int tag;
if (pp->crqb) {
dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
pp->crqb = NULL;
}
if (pp->crpb) {
dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
pp->crpb = NULL;
}
/*
* For GEN_I, there's no NCQ, so we have only a single sg_tbl.
* For later hardware, we have one unique sg_tbl per NCQ tag.
*/
for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
if (pp->sg_tbl[tag]) {
if (tag == 0 || !IS_GEN_I(hpriv))
dma_pool_free(hpriv->sg_tbl_pool,
pp->sg_tbl[tag],
pp->sg_tbl_dma[tag]);
pp->sg_tbl[tag] = NULL;
}
}
}
/**
* mv_port_start - Port specific init/start routine.
* @ap: ATA channel to manipulate
*
* Allocate and point to DMA memory, init port private memory,
* zero indices.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp;
unsigned long flags;
int tag;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
ap->private_data = pp;
pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
if (!pp->crqb)
return -ENOMEM;
memset(pp->crqb, 0, MV_CRQB_Q_SZ);
pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
if (!pp->crpb)
goto out_port_free_dma_mem;
memset(pp->crpb, 0, MV_CRPB_Q_SZ);
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
ap->flags |= ATA_FLAG_AN;
/*
* For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
* For later hardware, we need one unique sg_tbl per NCQ tag.
*/
for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
if (tag == 0 || !IS_GEN_I(hpriv)) {
pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
GFP_KERNEL, &pp->sg_tbl_dma[tag]);
if (!pp->sg_tbl[tag])
goto out_port_free_dma_mem;
} else {
pp->sg_tbl[tag] = pp->sg_tbl[0];
pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
}
}
spin_lock_irqsave(ap->lock, flags);
mv_save_cached_regs(ap);
mv_edma_cfg(ap, 0, 0);
spin_unlock_irqrestore(ap->lock, flags);
return 0;
out_port_free_dma_mem:
mv_port_free_dma_mem(ap);
return -ENOMEM;
}
/**
* mv_port_stop - Port specific cleanup/stop routine.
* @ap: ATA channel to manipulate
*
* Stop DMA, cleanup port memory.
*
* LOCKING:
* This routine uses the host lock to protect the DMA stop.
*/
static void mv_port_stop(struct ata_port *ap)
{
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
mv_stop_edma(ap);
mv_enable_port_irqs(ap, 0);
spin_unlock_irqrestore(ap->lock, flags);
mv_port_free_dma_mem(ap);
}
/**
* mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
* @qc: queued command whose SG list to source from
*
* Populate the SG list and mark the last entry.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_fill_sg(struct ata_queued_cmd *qc)
{
struct mv_port_priv *pp = qc->ap->private_data;
struct scatterlist *sg;
struct mv_sg *mv_sg, *last_sg = NULL;
unsigned int si;
mv_sg = pp->sg_tbl[qc->tag];
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
while (sg_len) {
u32 offset = addr & 0xffff;
u32 len = sg_len;
if (offset + len > 0x10000)
len = 0x10000 - offset;
mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
mv_sg->flags_size = cpu_to_le32(len & 0xffff);
mv_sg->reserved = 0;
sg_len -= len;
addr += len;
last_sg = mv_sg;
mv_sg++;
}
}
if (likely(last_sg))
last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
mb(); /* ensure data structure is visible to the chipset */
}
static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
{
u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
(last ? CRQB_CMD_LAST : 0);
*cmdw = cpu_to_le16(tmp);
}
/**
* mv_sff_irq_clear - Clear hardware interrupt after DMA.
* @ap: Port associated with this ATA transaction.
*
* We need this only for ATAPI bmdma transactions,
* as otherwise we experience spurious interrupts
* after libata-sff handles the bmdma interrupts.
*/
static void mv_sff_irq_clear(struct ata_port *ap)
{
mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
}
/**
* mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
* @qc: queued command to check for chipset/DMA compatibility.
*
* The bmdma engines cannot handle speculative data sizes
* (bytecount under/over flow). So only allow DMA for
* data transfer commands with known data sizes.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
if (scmd) {
switch (scmd->cmnd[0]) {
case READ_6:
case READ_10:
case READ_12:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case GPCMD_READ_CD:
case GPCMD_SEND_DVD_STRUCTURE:
case GPCMD_SEND_CUE_SHEET:
return 0; /* DMA is safe */
}
}
return -EOPNOTSUPP; /* use PIO instead */
}
/**
* mv_bmdma_setup - Set up BMDMA transaction
* @qc: queued command to prepare DMA for.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
mv_fill_sg(qc);
/* clear all DMA cmd bits */
writel(0, port_mmio + BMDMA_CMD);
/* load PRD table addr. */
writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
port_mmio + BMDMA_PRD_HIGH);
writelfl(pp->sg_tbl_dma[qc->tag],
port_mmio + BMDMA_PRD_LOW);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
/**
* mv_bmdma_start - Start a BMDMA transaction
* @qc: queued command to start DMA on.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
/* start host DMA transaction */
writelfl(cmd, port_mmio + BMDMA_CMD);
}
/**
* mv_bmdma_stop - Stop BMDMA transfer
* @qc: queued command to stop DMA on.
*
* Clears the ATA_DMA_START flag in the bmdma control register
*
* LOCKING:
* Inherited from caller.
*/
static void mv_bmdma_stop_ap(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD);
if (cmd & ATA_DMA_START) {
cmd &= ~ATA_DMA_START;
writelfl(cmd, port_mmio + BMDMA_CMD);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_sff_dma_pause(ap);
}
}
static void mv_bmdma_stop(struct ata_queued_cmd *qc)
{
mv_bmdma_stop_ap(qc->ap);
}
/**
* mv_bmdma_status - Read BMDMA status
* @ap: port for which to retrieve DMA status.
*
* Read and return equivalent of the sff BMDMA status register.
*
* LOCKING:
* Inherited from caller.
*/
static u8 mv_bmdma_status(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 reg, status;
/*
* Other bits are valid only if ATA_DMA_ACTIVE==0,
* and the ATA_DMA_INTR bit doesn't exist.
*/
reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE;
else if (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
else {
/*
* Just because DMA_ACTIVE is 0 (DMA completed),
* this does _not_ mean the device is "done".
* So we should not yet be signalling ATA_DMA_INTR
* in some cases. Eg. DSM/TRIM, and perhaps others.
*/
mv_bmdma_stop_ap(ap);
if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
status = 0;
else
status = ATA_DMA_INTR;
}
return status;
}
static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
/*
* Workaround for 88SX60x1 FEr SATA#24.
*
* Chip may corrupt WRITEs if multi_count >= 4kB.
* Note that READs are unaffected.
*
* It's not clear if this errata really means "4K bytes",
* or if it always happens for multi_count > 7
* regardless of device sector_size.
*
* So, for safety, any write with multi_count > 7
* gets converted here into a regular PIO write instead:
*/
if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
if (qc->dev->multi_count > 7) {
switch (tf->command) {
case ATA_CMD_WRITE_MULTI:
tf->command = ATA_CMD_PIO_WRITE;
break;
case ATA_CMD_WRITE_MULTI_FUA_EXT:
tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
/* fall through */
case ATA_CMD_WRITE_MULTI_EXT:
tf->command = ATA_CMD_PIO_WRITE_EXT;
break;
}
}
}
}
/**
* mv_qc_prep - Host specific command preparation.
* @qc: queued command to prepare
*
* This routine simply redirects to the general purpose routine
* if command is not DMA. Else, it handles prep of the CRQB
* (command request block), does some sanity checking, and calls
* the SG load routine.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
__le16 *cw;
struct ata_taskfile *tf = &qc->tf;
u16 flags = 0;
unsigned in_index;
switch (tf->protocol) {
case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
return;
/* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
mv_rw_multi_errata_sata24(qc);
return;
default:
return;
}
/* Fill in command request block
*/
if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx;
pp->crqb[in_index].sg_addr =
cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
pp->crqb[in_index].sg_addr_hi =
cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
cw = &pp->crqb[in_index].ata_cmd[0];
/* Sadly, the CRQB cannot accommodate all registers--there are
* only 11 bytes...so we must pick and choose required
* registers based on the command. So, we drop feature and
* hob_feature for [RW] DMA commands, but they are needed for
* NCQ. NCQ will drop hob_nsect, which is not needed there
* (nsect is used only for the tag; feat/hob_feat hold true nsect).
*/
switch (tf->command) {
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
case ATA_CMD_WRITE_FUA_EXT:
mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
break;
case ATA_CMD_FPDMA_READ:
case ATA_CMD_FPDMA_WRITE:
mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
break;
default:
/* The only other commands EDMA supports in non-queued and
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
* of which are defined/used by Linux. If we get here, this
* driver needs work.
*
* FIXME: modify libata to give qc_prep a return value and
* return error here.
*/
BUG_ON(tf->command);
break;
}
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
mv_fill_sg(qc);
}
/**
* mv_qc_prep_iie - Host specific command preparation.
* @qc: queued command to prepare
*
* This routine simply redirects to the general purpose routine
* if command is not DMA. Else, it handles prep of the CRQB
* (command request block), does some sanity checking, and calls
* the SG load routine.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
struct mv_crqb_iie *crqb;
struct ata_taskfile *tf = &qc->tf;
unsigned in_index;
u32 flags = 0;
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
return;
if (tf->command == ATA_CMD_DSM)
return; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
flags |= qc->tag << CRQB_HOSTQ_SHIFT;
flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx;
crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
crqb->flags = cpu_to_le32(flags);
crqb->ata_cmd[0] = cpu_to_le32(
(tf->command << 16) |
(tf->feature << 24)
);
crqb->ata_cmd[1] = cpu_to_le32(
(tf->lbal << 0) |
(tf->lbam << 8) |
(tf->lbah << 16) |
(tf->device << 24)
);
crqb->ata_cmd[2] = cpu_to_le32(
(tf->hob_lbal << 0) |
(tf->hob_lbam << 8) |
(tf->hob_lbah << 16) |
(tf->hob_feature << 24)
);
crqb->ata_cmd[3] = cpu_to_le32(
(tf->nsect << 0) |
(tf->hob_nsect << 8)
);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
mv_fill_sg(qc);
}
/**
* mv_sff_check_status - fetch device status, if valid
* @ap: ATA port to fetch status from
*
* When using command issue via mv_qc_issue_fis(),
* the initial ATA_BUSY state does not show up in the
* ATA status (shadow) register. This can confuse libata!
*
* So we have a hook here to fake ATA_BUSY for that situation,
* until the first time a BUSY, DRQ, or ERR bit is seen.
*
* The rest of the time, it simply returns the ATA status register.
*/
static u8 mv_sff_check_status(struct ata_port *ap)
{
u8 stat = ioread8(ap->ioaddr.status_addr);
struct mv_port_priv *pp = ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
else
stat = ATA_BUSY;
}
return stat;
}
/**
* mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
* @fis: fis to be sent
* @nwords: number of 32-bit words in the fis
*/
static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 ifctl, old_ifctl, ifstat;
int i, timeout = 200, final_word = nwords - 1;
/* Initiate FIS transmission mode */
old_ifctl = readl(port_mmio + SATA_IFCTL);
ifctl = 0x100 | (old_ifctl & 0xf);
writelfl(ifctl, port_mmio + SATA_IFCTL);
/* Send all words of the FIS except for the final word */
for (i = 0; i < final_word; ++i)
writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
/* Flag end-of-transmission, and then send the final word */
writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
/*
* Wait for FIS transmission to complete.
* This typically takes just a single iteration.
*/
do {
ifstat = readl(port_mmio + SATA_IFSTAT);
} while (!(ifstat & 0x1000) && --timeout);
/* Restore original port configuration */
writelfl(old_ifctl, port_mmio + SATA_IFCTL);
/* See if it worked */
if ((ifstat & 0x3000) != 0x1000) {
ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
__func__, ifstat);
return AC_ERR_OTHER;
}
return 0;
}
/**
* mv_qc_issue_fis - Issue a command directly as a FIS
* @qc: queued command to start
*
* Note that the ATA shadow registers are not updated
* after command issue, so the device will appear "READY"
* if polled, even while it is BUSY processing the command.
*
* So we use a status hook to fake ATA_BUSY until the drive changes state.
*
* Note: we don't get updated shadow regs on *completion*
* of non-data commands. So avoid sending them via this function,
* as they will appear to have completed immediately.
*
* GEN_IIE has special registers that we could get the result tf from,
* but earlier chipsets do not. For now, we ignore those registers.
*/
static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
struct ata_link *link = qc->dev->link;
u32 fis[5];
int err = 0;
ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
if (err)
return err;
switch (qc->tf.protocol) {
case ATAPI_PROT_PIO:
pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
/* fall through */
case ATAPI_PROT_NODATA:
ap->hsm_task_state = HSM_ST_FIRST;
break;
case ATA_PROT_PIO:
pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
if (qc->tf.flags & ATA_TFLAG_WRITE)
ap->hsm_task_state = HSM_ST_FIRST;
else
ap->hsm_task_state = HSM_ST;
break;
default:
ap->hsm_task_state = HSM_ST_LAST;
break;
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_sff_queue_pio_task(link, 0);
return 0;
}
/**
* mv_qc_issue - Initiate a command to the host
* @qc: queued command to start
*
* This routine simply redirects to the general purpose routine
* if command is not DMA. Else, it sanity checks our local
* caches of the request producer/consumer indices then enables
* DMA and bumps the request producer index.
*
* LOCKING:
* Inherited from caller.
*/
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
{
static int limit_warnings = 10;
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
u32 in_index;
unsigned int port_irqs;
pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
if (qc->tf.command == ATA_CMD_DSM) {
if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
return AC_ERR_OTHER;
break; /* use bmdma for this */
}
/* fall thru */
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
/* Write the request in pointer to kick the EDMA to life */
writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
port_mmio + EDMA_REQ_Q_IN_PTR);
return 0;
case ATA_PROT_PIO:
/*
* Errata SATA#16, SATA#24: warn if multiple DRQs expected.
*
* Someday, we might implement special polling workarounds
* for these, but it all seems rather unnecessary since we
* normally use only DMA for commands which transfer more
* than a single block of data.
*
* Much of the time, this could just work regardless.
* So for now, just log the incident, and allow the attempt.
*/
if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
--limit_warnings;
ata_link_warn(qc->dev->link, DRV_NAME
": attempting PIO w/multiple DRQ: "
"this may fail due to h/w errata\n");
}
/* drop through */
case ATA_PROT_NODATA:
case ATAPI_PROT_PIO:
case ATAPI_PROT_NODATA:
if (ap->flags & ATA_FLAG_PIO_POLLING)
qc->tf.flags |= ATA_TFLAG_POLLING;
break;
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
port_irqs = ERR_IRQ; /* mask device interrupt when polling */
else
port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
/*
* We're about to send a non-EDMA capable command to the
* port. Turn off EDMA so there won't be problems accessing
* shadow block, etc registers.
*/
mv_stop_edma(ap);
mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
mv_pmp_select(ap, qc->dev->link->pmp);
if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
struct mv_host_priv *hpriv = ap->host->private_data;
/*
* Workaround for 88SX60x1 FEr SATA#25 (part 2).
*
* After any NCQ error, the READ_LOG_EXT command
* from libata-eh *must* use mv_qc_issue_fis().
* Otherwise it might fail, due to chip errata.
*
* Rather than special-case it, we'll just *always*
* use this method here for READ_LOG_EXT, making for
* easier testing.
*/
if (IS_GEN_II(hpriv))
return mv_qc_issue_fis(qc);
}
return ata_bmdma_qc_issue(qc);
}
static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
{
struct mv_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
return NULL;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
return qc;
return NULL;
}
static void mv_pmp_error_handler(struct ata_port *ap)
{
unsigned int pmp, pmp_map;
struct mv_port_priv *pp = ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
/*
* Perform NCQ error analysis on failed PMPs
* before we freeze the port entirely.
*
* The failed PMPs are marked earlier by mv_pmp_eh_prep().
*/
pmp_map = pp->delayed_eh_pmp_map;
pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
for (pmp = 0; pmp_map != 0; pmp++) {
unsigned int this_pmp = (1 << pmp);
if (pmp_map & this_pmp) {
struct ata_link *link = &ap->pmp_link[pmp];
pmp_map &= ~this_pmp;
ata_eh_analyze_ncq_error(link);
}
}
ata_port_freeze(ap);
}
sata_pmp_error_handler(ap);
}
static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
return readl(port_mmio + SATA_TESTCTL) >> 16;
}
static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
{
struct ata_eh_info *ehi;
unsigned int pmp;
/*
* Initialize EH info for PMPs which saw device errors
*/
ehi = &ap->link.eh_info;
for (pmp = 0; pmp_map != 0; pmp++) {
unsigned int this_pmp = (1 << pmp);
if (pmp_map & this_pmp) {
struct ata_link *link = &ap->pmp_link[pmp];
pmp_map &= ~this_pmp;
ehi = &link->eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "dev err");
ehi->err_mask |= AC_ERR_DEV;
ehi->action |= ATA_EH_RESET;
ata_link_abort(link);
}
}
}
static int mv_req_q_empty(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 in_ptr, out_ptr;
in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
return (in_ptr == out_ptr); /* 1 == queue_is_empty */
}
static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
{
struct mv_port_priv *pp = ap->private_data;
int failed_links;
unsigned int old_map, new_map;
/*
* Device error during FBS+NCQ operation:
*
* Set a port flag to prevent further I/O being enqueued.
* Leave the EDMA running to drain outstanding commands from this port.
* Perform the post-mortem/EH only when all responses are complete.
* Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
*/
if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
pp->delayed_eh_pmp_map = 0;
}
old_map = pp->delayed_eh_pmp_map;
new_map = old_map | mv_get_err_pmp_map(ap);
if (old_map != new_map) {
pp->delayed_eh_pmp_map = new_map;
mv_pmp_eh_prep(ap, new_map & ~old_map);
}
failed_links = hweight16(new_map);
ata_port_info(ap,
"%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
__func__, pp->delayed_eh_pmp_map,
ap->qc_active, failed_links,
ap->nr_active_links);
if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
mv_process_crpb_entries(ap, pp);
mv_stop_edma(ap);
mv_eh_freeze(ap);
ata_port_info(ap, "%s: done\n", __func__);
return 1; /* handled */
}
ata_port_info(ap, "%s: waiting\n", __func__);
return 1; /* handled */
}
static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
{
/*
* Possible future enhancement:
*
* FBS+non-NCQ operation is not yet implemented.
* See related notes in mv_edma_cfg().
*
* Device error during FBS+non-NCQ operation:
*
* We need to snapshot the shadow registers for each failed command.
* Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
*/
return 0; /* not handled */
}
static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
{
struct mv_port_priv *pp = ap->private_data;
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
return 0; /* EDMA was not active: not handled */
if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
return 0; /* FBS was not active: not handled */
if (!(edma_err_cause & EDMA_ERR_DEV))
return 0; /* non DEV error: not handled */
edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
return 0; /* other problems: not handled */
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
/*
* EDMA should NOT have self-disabled for this case.
* If it did, then something is wrong elsewhere,
* and we cannot handle it here.
*/
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
__func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_ncq_dev_err(ap);
} else {
/*
* EDMA should have self-disabled for this case.
* If it did not, then something is wrong elsewhere,
* and we cannot handle it here.
*/
if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
__func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_non_ncq_dev_err(ap);
}
return 0; /* not handled */
}
static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
char *when = "idle";
ata_ehi_clear_desc(ehi);
if (edma_was_enabled) {
when = "EDMA enabled";
} else {
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
when = "polling";
}
ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
ehi->err_mask |= AC_ERR_OTHER;
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
}
/**
* mv_err_intr - Handle error interrupts on the port
* @ap: ATA channel to manipulate
*
* Most cases require a full reset of the chip's state machine,
* which also performs a COMRESET.
* Also, if the port disabled DMA, update our cached copy to match.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_err_intr(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 edma_err_cause, eh_freeze_mask, serr = 0;
u32 fis_cause = 0;
struct mv_port_priv *pp = ap->private_data;
struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int action = 0, err_mask = 0;
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ata_queued_cmd *qc;
int abort = 0;
/*
* Read and clear the SError and err_cause bits.
* For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
* the FIS_IRQ_CAUSE register before clearing edma_err_cause.
*/
sata_scr_read(&ap->link, SCR_ERROR, &serr);
sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
}
writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
if (edma_err_cause & EDMA_ERR_DEV) {
/*
* Device errors during FIS-based switching operation
* require special handling.
*/
if (mv_handle_dev_err(ap, edma_err_cause))
return;
}
qc = mv_get_active_qc(ap);
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
edma_err_cause, pp->pp_flags);
if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
if (fis_cause & FIS_IRQ_CAUSE_AN) {
u32 ec = edma_err_cause &
~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
sata_async_notification(ap);
if (!ec)
return; /* Just an AN; no need for the nukes */
ata_ehi_push_desc(ehi, "SDB notify");
}
}
/*
* All generations share these EDMA error cause bits:
*/
if (edma_err_cause & EDMA_ERR_DEV) {
err_mask |= AC_ERR_DEV;
action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "dev error");
}
if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR)) {
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "parity error");
}
if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
"dev disconnect" : "dev connect");
action |= ATA_EH_RESET;
}
/*
* Gen-I has a different SELF_DIS bit,
* different FREEZE bits, and no SERR bit:
*/
if (IS_GEN_I(hpriv)) {
eh_freeze_mask = EDMA_EH_FREEZE_5;
if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
} else {
eh_freeze_mask = EDMA_EH_FREEZE;
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
if (edma_err_cause & EDMA_ERR_SERR) {
ata_ehi_push_desc(ehi, "SError=%08x", serr);
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_RESET;
}
}
if (!err_mask) {
err_mask = AC_ERR_OTHER;
action |= ATA_EH_RESET;
}
ehi->serror |= serr;
ehi->action |= action;
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
if (err_mask == AC_ERR_DEV) {
/*
* Cannot do ata_port_freeze() here,
* because it would kill PIO access,
* which is needed for further diagnosis.
*/
mv_eh_freeze(ap);
abort = 1;
} else if (edma_err_cause & eh_freeze_mask) {
/*
* Note to self: ata_port_freeze() calls ata_port_abort()
*/
ata_port_freeze(ap);
} else {
abort = 1;
}
if (abort) {
if (qc)
ata_link_abort(qc->dev->link);
else
ata_port_abort(ap);
}
}
static bool mv_process_crpb_response(struct ata_port *ap,
struct mv_crpb *response, unsigned int tag, int ncq_enabled)
{
u8 ata_status;
u16 edma_status = le16_to_cpu(response->flags);
/*
* edma_status from a response queue entry:
* LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
* MSB is saved ATA status from command completion.
*/
if (!ncq_enabled) {
u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
if (err_cause) {
/*
* Error will be seen/handled by
* mv_err_intr(). So do nothing at all here.
*/
return false;
}
}
ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
if (!ac_err_mask(ata_status))
return true;
/* else: leave it for mv_err_intr() */
return false;
}
static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_host_priv *hpriv = ap->host->private_data;
u32 in_index;
bool work_done = false;
u32 done_mask = 0;
int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
/* Get the hardware queue position index */
in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
/* Process new responses from since the last time we looked */
while (in_index != pp->resp_idx) {
unsigned int tag;
struct mv_crpb *response = &pp->crpb[pp->resp_idx];
pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
if (IS_GEN_I(hpriv)) {
/* 50xx: no NCQ, only one command active at a time */
tag = ap->link.active_tag;
} else {
/* Gen II/IIE: get command tag from CRPB entry */
tag = le16_to_cpu(response->id) & 0x1f;
}
if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
done_mask |= 1 << tag;
work_done = true;
}
if (work_done) {
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
/* Update the software queue position index in hardware */
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
(pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
port_mmio + EDMA_RSP_Q_OUT_PTR);
}
}
static void mv_port_intr(struct ata_port *ap, u32 port_cause)
{
struct mv_port_priv *pp;
int edma_was_enabled;
/*
* Grab a snapshot of the EDMA_EN flag setting,
* so that we have a consistent view for this port,
* even if something we call of our routines changes it.
*/
pp = ap->private_data;
edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
/*
* Process completed CRPB response(s) before other events.
*/
if (edma_was_enabled && (port_cause & DONE_IRQ)) {
mv_process_crpb_entries(ap, pp);
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
mv_handle_fbs_ncq_dev_err(ap);
}
/*
* Handle chip-reported errors, or continue on to handle PIO.
*/
if (unlikely(port_cause & ERR_IRQ)) {
mv_err_intr(ap);
} else if (!edma_was_enabled) {
struct ata_queued_cmd *qc = mv_get_active_qc(ap);
if (qc)
ata_bmdma_port_intr(ap, qc);
else
mv_unexpected_intr(ap, edma_was_enabled);
}
}
/**
* mv_host_intr - Handle all interrupts on the given host controller
* @host: host specific structure
* @main_irq_cause: Main interrupt cause register for the chip.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base, *hc_mmio;
unsigned int handled = 0, port;
/* If asserted, clear the "all ports" IRQ coalescing bit */
if (main_irq_cause & ALL_PORTS_COAL_DONE)
writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
for (port = 0; port < hpriv->n_ports; port++) {
struct ata_port *ap = host->ports[port];
unsigned int p, shift, hardport, port_cause;
MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
/*
* Each hc within the host has its own hc_irq_cause register,
* where the interrupting ports bits get ack'd.
*/
if (hardport == 0) { /* first port on this hc ? */
u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
u32 port_mask, ack_irqs;
/*
* Skip this entire hc if nothing pending for any ports
*/
if (!hc_cause) {
port += MV_PORTS_PER_HC - 1;
continue;
}
/*
* We don't need/want to read the hc_irq_cause register,
* because doing so hurts performance, and
* main_irq_cause already gives us everything we need.
*
* But we do have to *write* to the hc_irq_cause to ack
* the ports that we are handling this time through.
*
* This requires that we create a bitmap for those
* ports which interrupted us, and use that bitmap
* to ack (only) those ports via hc_irq_cause.
*/
ack_irqs = 0;
if (hc_cause & PORTS_0_3_COAL_DONE)
ack_irqs = HC_COAL_IRQ;
for (p = 0; p < MV_PORTS_PER_HC; ++p) {
if ((port + p) >= hpriv->n_ports)
break;
port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
if (hc_cause & port_mask)
ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
}
hc_mmio = mv_hc_base_from_port(mmio, port);
writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
handled = 1;
}
/*
* Handle interrupts signalled for this port:
*/
port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
if (port_cause)
mv_port_intr(ap, port_cause);
}
return handled;
}
static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
{
struct mv_host_priv *hpriv = host->private_data;
struct ata_port *ap;
struct ata_queued_cmd *qc;
struct ata_eh_info *ehi;
unsigned int i, err_mask, printed = 0;
u32 err_cause;
err_cause = readl(mmio + hpriv->irq_cause_offset);
dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
DPRINTK("All regs @ PCI error\n");
mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
writelfl(0, mmio + hpriv->irq_cause_offset);
for (i = 0; i < host->n_ports; i++) {
ap = host->ports[i];
if (!ata_link_offline(&ap->link)) {
ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
if (!printed++)
ata_ehi_push_desc(ehi,
"PCI err cause 0x%08x", err_cause);
err_mask = AC_ERR_HOST_BUS;
ehi->action = ATA_EH_RESET;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
ata_port_freeze(ap);
}
}
return 1; /* handled */
}
/**
* mv_interrupt - Main interrupt event handler
* @irq: unused
* @dev_instance: private data; in this case the host structure
*
* Read the read only register to determine if any host
* controllers have pending interrupts. If so, call lower level
* routine to handle. Also check for PCI errors which are only
* reported here.
*
* LOCKING:
* This routine holds the host lock while processing pending
* interrupts.
*/
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct mv_host_priv *hpriv = host->private_data;
unsigned int handled = 0;
int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
u32 main_irq_cause, pending_irqs;
spin_lock(&host->lock);
/* for MSI: block new interrupts while in here */
if (using_msi)
mv_write_main_irq_mask(0, hpriv);
main_irq_cause = readl(hpriv->main_irq_cause_addr);
pending_irqs = main_irq_cause & hpriv->main_irq_mask;
/*
* Deal with cases where we either have nothing pending, or have read
* a bogus register value which can indicate HW removal or PCI fault.
*/
if (pending_irqs && main_irq_cause != 0xffffffffU) {
if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
handled = mv_pci_error(host, hpriv->base);
else
handled = mv_host_intr(host, pending_irqs);
}
/* for MSI: unmask; interrupt cause bits will retrigger now */
if (using_msi)
mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
{
unsigned int ofs;
switch (sc_reg_in) {
case SCR_STATUS:
case SCR_ERROR:
case SCR_CONTROL:
ofs = sc_reg_in * sizeof(u32);
break;
default:
ofs = 0xffffffffU;
break;
}
return ofs;
}
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{
struct mv_host_priv *hpriv = link->ap->host->private_data;
void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
*val = readl(addr + ofs);
return 0;
} else
return -EINVAL;
}
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{
struct mv_host_priv *hpriv = link->ap->host->private_data;
void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
writelfl(val, addr + ofs);
return 0;
} else
return -EINVAL;
}
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
int early_5080;
early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
if (!early_5080) {
u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
tmp |= (1 << 0);
writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
}
mv_reset_pci_bus(host, mmio);
}
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
writel(0x0fcfffff, mmio + FLASH_CTL);
}
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio)
{
void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
u32 tmp;
tmp = readl(phy_mmio + MV5_PHY_MODE);
hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
}
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{
u32 tmp;
writel(0, mmio + GPIO_PORT_CTL);
/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
tmp |= ~(1 << 0);
writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
}
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port)
{
void __iomem *phy_mmio = mv5_phy_base(mmio, port);
const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
u32 tmp;
int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
if (fix_apm_sq) {
tmp = readl(phy_mmio + MV5_LTMODE);
tmp |= (1 << 19);
writel(tmp, phy_mmio + MV5_LTMODE);
tmp = readl(phy_mmio + MV5_PHY_CTL);
tmp &= ~0x3;
tmp |= 0x1;
writel(tmp, phy_mmio + MV5_PHY_CTL);
}
tmp = readl(phy_mmio + MV5_PHY_MODE);
tmp &= ~mask;
tmp |= hpriv->signal[port].pre;
tmp |= hpriv->signal[port].amps;
writel(tmp, phy_mmio + MV5_PHY_MODE);
}
#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
writel(0x11f, port_mmio + EDMA_CFG);
ZERO(0x004); /* timer */
ZERO(0x008); /* irq err cause */
ZERO(0x00c); /* irq err mask */
ZERO(0x010); /* rq bah */
ZERO(0x014); /* rq inp */
ZERO(0x018); /* rq outp */
ZERO(0x01c); /* respq bah */
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
}
#undef ZERO
#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int hc)
{
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
u32 tmp;
ZERO(0x00c);
ZERO(0x010);
ZERO(0x014);
ZERO(0x018);
tmp = readl(hc_mmio + 0x20);
tmp &= 0x1c1c1c1c;
tmp |= 0x03030303;
writel(tmp, hc_mmio + 0x20);
}
#undef ZERO
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc)
{
unsigned int hc, port;
for (hc = 0; hc < n_hc; hc++) {
for (port = 0; port < MV_PORTS_PER_HC; port++)
mv5_reset_hc_port(hpriv, mmio,
(hc * MV_PORTS_PER_HC) + port);
mv5_reset_one_hc(hpriv, mmio, hc);
}
return 0;
}
#undef ZERO
#define ZERO(reg) writel(0, mmio + (reg))
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
{
struct mv_host_priv *hpriv = host->private_data;
u32 tmp;
tmp = readl(mmio + MV_PCI_MODE);
tmp &= 0xff00ffff;
writel(tmp, mmio + MV_PCI_MODE);
ZERO(MV_PCI_DISC_TIMER);
ZERO(MV_PCI_MSI_TRIGGER);
writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
ZERO(MV_PCI_SERR_MASK);
ZERO(hpriv->irq_cause_offset);
ZERO(hpriv->irq_mask_offset);
ZERO(MV_PCI_ERR_LOW_ADDRESS);
ZERO(MV_PCI_ERR_HIGH_ADDRESS);
ZERO(MV_PCI_ERR_ATTRIBUTE);
ZERO(MV_PCI_ERR_COMMAND);
}
#undef ZERO
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
u32 tmp;
mv5_reset_flash(hpriv, mmio);
tmp = readl(mmio + GPIO_PORT_CTL);
tmp &= 0x3;
tmp |= (1 << 5) | (1 << 6);
writel(tmp, mmio + GPIO_PORT_CTL);
}
/**
* mv6_reset_hc - Perform the 6xxx global soft reset
* @mmio: base address of the HBA
*
* This routine only applies to 6xxx parts.
*
* LOCKING:
* Inherited from caller.
*/
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc)
{
void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
int i, rc = 0;
u32 t;
/* Following procedure defined in PCI "main command and status
* register" table.
*/
t = readl(reg);
writel(t | STOP_PCI_MASTER, reg);
for (i = 0; i < 1000; i++) {
udelay(1);
t = readl(reg);
if (PCI_MASTER_EMPTY & t)
break;
}
if (!(PCI_MASTER_EMPTY & t)) {
printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
rc = 1;
goto done;
}
/* set reset */
i = 5;
do {
writel(t | GLOB_SFT_RST, reg);
t = readl(reg);
udelay(1);
} while (!(GLOB_SFT_RST & t) && (i-- > 0));
if (!(GLOB_SFT_RST & t)) {
printk(KERN_ERR DRV_NAME ": can't set global reset\n");
rc = 1;
goto done;
}
/* clear reset and *reenable the PCI master* (not mentioned in spec) */
i = 5;
do {
writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
t = readl(reg);
udelay(1);
} while ((GLOB_SFT_RST & t) && (i-- > 0));
if (GLOB_SFT_RST & t) {
printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
rc = 1;
}
done:
return rc;
}
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio)
{
void __iomem *port_mmio;
u32 tmp;
tmp = readl(mmio + RESET_CFG);
if ((tmp & (1 << 0)) == 0) {
hpriv->signal[idx].amps = 0x7 << 8;
hpriv->signal[idx].pre = 0x1 << 5;
return;
}
port_mmio = mv_port_base(mmio, idx);
tmp = readl(port_mmio + PHY_MODE2);
hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
}
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{
writel(0x00000060, mmio + GPIO_PORT_CTL);
}
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
u32 hp_flags = hpriv->hp_flags;
int fix_phy_mode2 =
hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
int fix_phy_mode4 =
hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
u32 m2, m3;
if (fix_phy_mode2) {
m2 = readl(port_mmio + PHY_MODE2);
m2 &= ~(1 << 16);
m2 |= (1 << 31);
writel(m2, port_mmio + PHY_MODE2);
udelay(200);
m2 = readl(port_mmio + PHY_MODE2);
m2 &= ~((1 << 16) | (1 << 31));
writel(m2, port_mmio + PHY_MODE2);
udelay(200);
}
/*
* Gen-II/IIe PHY_MODE3 errata RM#2:
* Achieves better receiver noise performance than the h/w default:
*/
m3 = readl(port_mmio + PHY_MODE3);
m3 = (m3 & 0x1f) | (0x5555601 << 5);
/* Guideline 88F5182 (GL# SATA-S11) */
if (IS_SOC(hpriv))
m3 &= ~0x1c;
if (fix_phy_mode4) {
u32 m4 = readl(port_mmio + PHY_MODE4);
/*
* Enforce reserved-bit restrictions on GenIIe devices only.
* For earlier chipsets, force only the internal config field
* (workaround for errata FEr SATA#10 part 1).
*/
if (IS_GEN_IIE(hpriv))
m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
else
m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
writel(m4, port_mmio + PHY_MODE4);
}
/*
* Workaround for 60x1-B2 errata SATA#13:
* Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
* so we must always rewrite PHY_MODE3 after PHY_MODE4.
* Or ensure we use writelfl() when writing PHY_MODE4.
*/
writel(m3, port_mmio + PHY_MODE3);
/* Revert values of pre-emphasis and signal amps to the saved ones */
m2 = readl(port_mmio + PHY_MODE2);
m2 &= ~MV_M2_PREAMP_MASK;
m2 |= hpriv->signal[port].amps;
m2 |= hpriv->signal[port].pre;
m2 &= ~(1 << 16);
/* according to mvSata 3.6.1, some IIE values are fixed */
if (IS_GEN_IIE(hpriv)) {
m2 &= ~0xC30FF01F;
m2 |= 0x0000900F;
}
writel(m2, port_mmio + PHY_MODE2);
}
/* TODO: use the generic LED interface to configure the SATA Presence */
/* & Acitivy LEDs on the board */
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
void __iomem *mmio)
{
return;
}
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio)
{
void __iomem *port_mmio;
u32 tmp;
port_mmio = mv_port_base(mmio, idx);
tmp = readl(port_mmio + PHY_MODE2);
hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
}
#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
writel(0x101f, port_mmio + EDMA_CFG);
ZERO(0x004); /* timer */
ZERO(0x008); /* irq err cause */
ZERO(0x00c); /* irq err mask */
ZERO(0x010); /* rq bah */
ZERO(0x014); /* rq inp */
ZERO(0x018); /* rq outp */
ZERO(0x01c); /* respq bah */
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
}
#undef ZERO
#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
void __iomem *mmio)
{
void __iomem *hc_mmio = mv_hc_base(mmio, 0);
ZERO(0x00c);
ZERO(0x010);
ZERO(0x014);
}
#undef ZERO
static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int n_hc)
{
unsigned int port;
for (port = 0; port < hpriv->n_ports; port++)
mv_soc_reset_hc_port(hpriv, mmio, port);
mv_soc_reset_one_hc(hpriv, mmio);
return 0;
}
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
void __iomem *mmio)
{
return;
}
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
{
return;
}
static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
u32 reg;
reg = readl(port_mmio + PHY_MODE3);
reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
reg |= (0x1 << 27);
reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
reg |= (0x1 << 29);
writel(reg, port_mmio + PHY_MODE3);
reg = readl(port_mmio + PHY_MODE4);
reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
reg |= (0x1 << 16);
writel(reg, port_mmio + PHY_MODE4);
reg = readl(port_mmio + PHY_MODE9_GEN2);
reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
reg |= 0x8;
reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
writel(reg, port_mmio + PHY_MODE9_GEN2);
reg = readl(port_mmio + PHY_MODE9_GEN1);
reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
reg |= 0x8;
reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
writel(reg, port_mmio + PHY_MODE9_GEN1);
}
/**
* soc_is_65 - check if the soc is 65 nano device
*
* Detect the type of the SoC, this is done by reading the PHYCFG_OFS
* register, this register should contain non-zero value and it exists only
* in the 65 nano devices, when reading it from older devices we get 0.
*/
static bool soc_is_65n(struct mv_host_priv *hpriv)
{
void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
if (readl(port0_mmio + PHYCFG_OFS))
return true;
return false;
}
static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
{
u32 ifcfg = readl(port_mmio + SATA_IFCFG);
ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
if (want_gen2i)
ifcfg |= (1 << 7); /* enable gen2i speed */
writelfl(ifcfg, port_mmio + SATA_IFCFG);
}
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no)
{
void __iomem *port_mmio = mv_port_base(mmio, port_no);
/*
* The datasheet warns against setting EDMA_RESET when EDMA is active
* (but doesn't say what the problem might be). So we first try
* to disable the EDMA engine before doing the EDMA_RESET operation.
*/
mv_stop_edma_engine(port_mmio);
writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
if (!IS_GEN_I(hpriv)) {
/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
mv_setup_ifcfg(port_mmio, 1);
}
/*
* Strobing EDMA_RESET here causes a hard reset of the SATA transport,
* link, and physical layers. It resets all SATA interface registers
* (except for SATA_IFCFG), and issues a COMRESET to the dev.
*/
writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
udelay(25); /* allow reset propagation */
writelfl(0, port_mmio + EDMA_CMD);
hpriv->ops->phy_errata(hpriv, mmio, port_no);
if (IS_GEN_I(hpriv))
mdelay(1);
}
static void mv_pmp_select(struct ata_port *ap, int pmp)
{
if (sata_pmp_supported(ap)) {
void __iomem *port_mmio = mv_ap_base(ap);
u32 reg = readl(port_mmio + SATA_IFCTL);
int old = reg & 0xf;
if (old != pmp) {
reg = (reg & ~0xf) | pmp;
writelfl(reg, port_mmio + SATA_IFCTL);
}
}
}
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
mv_pmp_select(link->ap, sata_srst_pmp(link));
return sata_std_hardreset(link, class, deadline);
}
static int mv_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
mv_pmp_select(link->ap, sata_srst_pmp(link));
return ata_sff_softreset(link, class, deadline);
}
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp = ap->private_data;
void __iomem *mmio = hpriv->base;
int rc, attempts = 0, extra = 0;
u32 sstatus;
bool online;
mv_reset_channel(hpriv, mmio, ap->port_no);
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
pp->pp_flags &=
~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
/* Workaround for errata FEr SATA#10 (part 2) */
do {
const unsigned long *timing =
sata_ehc_deb_timing(&link->eh_context);
rc = sata_link_hardreset(link, timing, deadline + extra,
&online, NULL);
rc = online ? -EAGAIN : rc;
if (rc)
return rc;
sata_scr_read(link, SCR_STATUS, &sstatus);
if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
/* Force 1.5gb/s link speed and try again */
mv_setup_ifcfg(mv_ap_base(ap), 0);
if (time_after(jiffies + HZ, deadline))
extra = HZ; /* only extend it once, max */
}
} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
mv_save_cached_regs(ap);
mv_edma_cfg(ap, 0, 0);
return rc;
}
static void mv_eh_freeze(struct ata_port *ap)
{
mv_stop_edma(ap);
mv_enable_port_irqs(ap, 0);
}
static void mv_eh_thaw(struct ata_port *ap)
{
struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int port = ap->port_no;
unsigned int hardport = mv_hardport_from_port(port);
void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
void __iomem *port_mmio = mv_ap_base(ap);
u32 hc_irq_cause;
/* clear EDMA errors on this port */
writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
/* clear pending irq events */
hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
mv_enable_port_irqs(ap, ERR_IRQ);
}
/**
* mv_port_init - Perform some early initialization on a single port.
* @port: libata data structure storing shadow register addresses
* @port_mmio: base address of the port
*
* Initialize shadow register mmio addresses, clear outstanding
* interrupts on the port, and unmask interrupts for the future
* start of the port.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
{
void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
/* PIO related setup
*/
port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
port->error_addr =
port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
port->status_addr =
port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
/* special case: control/altstatus doesn't have ATA_REG_ address */
port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
/* Clear any currently outstanding port interrupt conditions */
serr = port_mmio + mv_scr_offset(SCR_ERROR);
writelfl(readl(serr), serr);
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
/* unmask all non-transient EDMA error interrupts */
writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
readl(port_mmio + EDMA_CFG),
readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
readl(port_mmio + EDMA_ERR_IRQ_MASK));
}
static unsigned int mv_in_pcix_mode(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
u32 reg;
if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
return 0; /* not PCI-X capable */
reg = readl(mmio + MV_PCI_MODE);
if ((reg & MV_PCI_MODE_MASK) == 0)
return 0; /* conventional PCI mode */
return 1; /* chip is in PCI-X mode */
}
static int mv_pci_cut_through_okay(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
u32 reg;
if (!mv_in_pcix_mode(host)) {
reg = readl(mmio + MV_PCI_COMMAND);
if (reg & MV_PCI_COMMAND_MRDTRIG)
return 0; /* not okay */
}
return 1; /* okay */
}
static void mv_60x1b2_errata_pci7(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
/* workaround for 60x1-B2 errata PCI#7 */
if (mv_in_pcix_mode(host)) {
u32 reg = readl(mmio + MV_PCI_COMMAND);
writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
}
}
static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct mv_host_priv *hpriv = host->private_data;
u32 hp_flags = hpriv->hp_flags;
switch (board_idx) {
case chip_5080:
hpriv->ops = &mv5xxx_ops;
hp_flags |= MV_HP_GEN_I;
switch (pdev->revision) {
case 0x1:
hp_flags |= MV_HP_ERRATA_50XXB0;
break;
case 0x3:
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
dev_warn(&pdev->dev,
"Applying 50XXB2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
break;
case chip_504x:
case chip_508x:
hpriv->ops = &mv5xxx_ops;
hp_flags |= MV_HP_GEN_I;
switch (pdev->revision) {
case 0x0:
hp_flags |= MV_HP_ERRATA_50XXB0;
break;
case 0x3:
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
dev_warn(&pdev->dev,
"Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
break;
case chip_604x:
case chip_608x:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_II;
switch (pdev->revision) {
case 0x7:
mv_60x1b2_errata_pci7(host);
hp_flags |= MV_HP_ERRATA_60X1B2;
break;
case 0x9:
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
dev_warn(&pdev->dev,
"Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1B2;
break;
}
break;
case chip_7042:
hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
if (pdev->vendor == PCI_VENDOR_ID_TTI &&
(pdev->device == 0x2300 || pdev->device == 0x2310))
{
/*
* Highpoint RocketRAID PCIe 23xx series cards:
*
* Unconfigured drives are treated as "Legacy"
* by the BIOS, and it overwrites sector 8 with
* a "Lgcy" metadata block prior to Linux boot.
*
* Configured drives (RAID or JBOD) leave sector 8
* alone, but instead overwrite a high numbered
* sector for the RAID metadata. This sector can
* be determined exactly, by truncating the physical
* drive capacity to a nice even GB value.
*
* RAID metadata is at: (dev->n_sectors & ~0xfffff)
*
* Warn the user, lest they think we're just buggy.
*/
printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
" BIOS CORRUPTS DATA on all attached drives,"
" regardless of if/how they are configured."
" BEWARE!\n");
printk(KERN_WARNING DRV_NAME ": For data safety, do not"
" use sectors 8-9 on \"Legacy\" drives,"
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
}
/* drop through */
case chip_6042:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_IIE;
if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
hp_flags |= MV_HP_CUT_THROUGH;
switch (pdev->revision) {
case 0x2: /* Rev.B0: the first/only public release */
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
dev_warn(&pdev->dev,
"Applying 60X1C0 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
}
break;
case chip_soc:
if (soc_is_65n(hpriv))
hpriv->ops = &mv_soc_65n_ops;
else
hpriv->ops = &mv_soc_ops;
hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
MV_HP_ERRATA_60X1C0;
break;
default:
dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
return 1;
}
hpriv->hp_flags = hp_flags;
if (hp_flags & MV_HP_PCIE) {
hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
hpriv->irq_mask_offset = PCIE_IRQ_MASK;
hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
} else {
hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
hpriv->irq_mask_offset = PCI_IRQ_MASK;
hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
}
return 0;
}
/**
* mv_init_host - Perform some early initialization of the host.
* @host: ATA host to initialize
*
* If possible, do an early global reset of the host. Then do
* our port init and clear/unmask all/relevant host interrupts.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_init_host(struct ata_host *host)
{
int rc = 0, n_hc, port, hc;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
rc = mv_chip_id(host, hpriv->board_idx);
if (rc)
goto done;
if (IS_SOC(hpriv)) {
hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
} else {
hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
}
/* initialize shadow irq mask with register's value */
hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
/* global interrupt mask: 0 == mask everything */
mv_set_main_irq_mask(host, ~0, 0);
n_hc = mv_get_hc_count(host->ports[0]->flags);
for (port = 0; port < host->n_ports; port++)
if (hpriv->ops->read_preamp)
hpriv->ops->read_preamp(hpriv, port, mmio);
rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
if (rc)
goto done;
hpriv->ops->reset_flash(hpriv, mmio);
hpriv->ops->reset_bus(host, mmio);
hpriv->ops->enable_leds(hpriv, mmio);
for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_port_init(&ap->ioaddr, port_mmio);
}
for (hc = 0; hc < n_hc; hc++) {
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
"(before clear)=0x%08x\n", hc,
readl(hc_mmio + HC_CFG),
readl(hc_mmio + HC_IRQ_CAUSE));
/* Clear any currently outstanding hc interrupt conditions */
writelfl(0, hc_mmio + HC_IRQ_CAUSE);
}
if (!IS_SOC(hpriv)) {
/* Clear any currently outstanding host interrupt conditions */
writelfl(0, mmio + hpriv->irq_cause_offset);
/* and unmask interrupt generation for host regs */
writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
}
/*
* enable only global host interrupts for now.
* The per-port interrupts get done later as ports are set up.
*/
mv_set_main_irq_mask(host, 0, PCI_ERR);
mv_set_irq_coalescing(host, irq_coalescing_io_count,
irq_coalescing_usecs);
done:
return rc;
}
static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
{
hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
MV_CRQB_Q_SZ, 0);
if (!hpriv->crqb_pool)
return -ENOMEM;
hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
MV_CRPB_Q_SZ, 0);
if (!hpriv->crpb_pool)
return -ENOMEM;
hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
MV_SG_TBL_SZ, 0);
if (!hpriv->sg_tbl_pool)
return -ENOMEM;
return 0;
}
static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
const struct mbus_dram_target_info *dram)
{
int i;
for (i = 0; i < 4; i++) {
writel(0, hpriv->base + WINDOW_CTRL(i));
writel(0, hpriv->base + WINDOW_BASE(i));
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
hpriv->base + WINDOW_CTRL(i));
writel(cs->base, hpriv->base + WINDOW_BASE(i));
}
}
/**
* mv_platform_probe - handle a positive probe of an soc Marvell
* host
* @pdev: platform device found
*
* LOCKING:
* Inherited from caller.
*/
static int mv_platform_probe(struct platform_device *pdev)
{
const struct mv_sata_platform_data *mv_platform_data;
const struct mbus_dram_target_info *dram;
const struct ata_port_info *ppi[] =
{ &mv_port_info[chip_soc], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
struct resource *res;
int n_ports = 0, irq = 0;
int rc;
int port;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/*
* Simple resource validation ..
*/
if (unlikely(pdev->num_resources != 2)) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
/*
* Get the register base first
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL)
return -EINVAL;
/* allocate host */
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
} else {
mv_platform_data = dev_get_platdata(&pdev->dev);
n_ports = mv_platform_data->n_ports;
irq = platform_get_irq(pdev, 0);
}
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!host || !hpriv)
return -ENOMEM;
hpriv->port_clks = devm_kzalloc(&pdev->dev,
sizeof(struct clk *) * n_ports,
GFP_KERNEL);
if (!hpriv->port_clks)
return -ENOMEM;
hpriv->port_phys = devm_kzalloc(&pdev->dev,
sizeof(struct phy *) * n_ports,
GFP_KERNEL);
if (!hpriv->port_phys)
return -ENOMEM;
host->private_data = hpriv;
hpriv->board_idx = chip_soc;
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
hpriv->base -= SATAHC0_REG_BASE;
hpriv->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(hpriv->clk))
dev_notice(&pdev->dev, "cannot get optional clkdev\n");
else
clk_prepare_enable(hpriv->clk);
for (port = 0; port < n_ports; port++) {
char port_number[16];
sprintf(port_number, "%d", port);
hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
if (!IS_ERR(hpriv->port_clks[port]))
clk_prepare_enable(hpriv->port_clks[port]);
sprintf(port_number, "port%d", port);
hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
port_number);
if (IS_ERR(hpriv->port_phys[port])) {
rc = PTR_ERR(hpriv->port_phys[port]);
hpriv->port_phys[port] = NULL;
if (rc != -EPROBE_DEFER)
dev_warn(&pdev->dev, "error getting phy %d", rc);
/* Cleanup only the initialized ports */
hpriv->n_ports = port;
goto err;
} else
phy_power_on(hpriv->port_phys[port]);
}
/* All the ports have been initialized */
hpriv->n_ports = n_ports;
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
mv_conf_mbus_windows(hpriv, dram);
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
goto err;
/*
* To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
* updated in the LP_PHY_CTL register.
*/
if (pdev->dev.of_node &&
of_device_is_compatible(pdev->dev.of_node,
"marvell,armada-370-sata"))
hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
goto err;
dev_info(&pdev->dev, "slots %u ports %d\n",
(unsigned)MV_MAX_Q_DEPTH, host->n_ports);
rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
if (!rc)
return 0;
err:
if (!IS_ERR(hpriv->clk)) {
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
for (port = 0; port < hpriv->n_ports; port++) {
if (!IS_ERR(hpriv->port_clks[port])) {
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
}
phy_power_off(hpriv->port_phys[port]);
}
return rc;
}
/*
*
* mv_platform_remove - unplug a platform interface
* @pdev: platform device
*
* A platform bus SATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
static int mv_platform_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct mv_host_priv *hpriv = host->private_data;
int port;
ata_host_detach(host);
if (!IS_ERR(hpriv->clk)) {
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
for (port = 0; port < host->n_ports; port++) {
if (!IS_ERR(hpriv->port_clks[port])) {
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
}
phy_power_off(hpriv->port_phys[port]);
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(pdev);
if (host)
return ata_host_suspend(host, state);
else
return 0;
}
static int mv_platform_resume(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
const struct mbus_dram_target_info *dram;
int ret;
if (host) {
struct mv_host_priv *hpriv = host->private_data;
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
mv_conf_mbus_windows(hpriv, dram);
/* initialize adapter */
ret = mv_init_host(host);
if (ret) {
printk(KERN_ERR DRV_NAME ": Error during HW init\n");
return ret;
}
ata_host_resume(host);
}
return 0;
}
#else
#define mv_platform_suspend NULL
#define mv_platform_resume NULL
#endif
#ifdef CONFIG_OF
static struct of_device_id mv_sata_dt_ids[] = {
{ .compatible = "marvell,armada-370-sata", },
{ .compatible = "marvell,orion-sata", },
{},
};
MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
#endif
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
.remove = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(mv_sata_dt_ids),
},
};
#ifdef CONFIG_PCI
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
.id_table = mv_pci_tbl,
.probe = mv_pci_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = mv_pci_device_resume,
#endif
};
/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
int rc;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
}
return rc;
}
/**
* mv_print_info - Dump key info to kernel log for perusal.
* @host: ATA host to print info about
*
* FIXME: complete this.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_print_info(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct mv_host_priv *hpriv = host->private_data;
u8 scc;
const char *scc_s, *gen;
/* Use this to determine the HW stepping of the chip so we know
* what errata to workaround
*/
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
if (scc == 0)
scc_s = "SCSI";
else if (scc == 0x01)
scc_s = "RAID";
else
scc_s = "?";
if (IS_GEN_I(hpriv))
gen = "I";
else if (IS_GEN_II(hpriv))
gen = "II";
else if (IS_GEN_IIE(hpriv))
gen = "IIE";
else
gen = "?";
dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
}
/**
* mv_pci_init_one - handle a positive probe of a PCI Marvell host
* @pdev: PCI device found
* @ent: PCI device ID entry for the matched host
*
* LOCKING:
* Inherited from caller.
*/
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int board_idx = (unsigned int)ent->driver_data;
const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
int n_ports, port, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!host || !hpriv)
return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = board_idx;
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
hpriv->base = host->iomap[MV_PRIMARY_BAR];
rc = pci_go_64(pdev);
if (rc)
return rc;
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
return rc;
for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(hpriv->base, port);
unsigned int offset = port_mmio - hpriv->base;
ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
}
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
return rc;
/* Enable message-switched interrupts, if requested */
if (msi && pci_enable_msi(pdev) == 0)
hpriv->hp_flags |= MV_HP_FLAG_MSI;
mv_dump_pci_cfg(pdev, 0x68);
mv_print_info(host);
pci_set_master(pdev);
pci_try_set_mwi(pdev);
return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
}
#ifdef CONFIG_PM_SLEEP
static int mv_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
return rc;
ata_host_resume(host);
return 0;
}
#endif
#endif
static int __init mv_init(void)
{
int rc = -ENODEV;
#ifdef CONFIG_PCI
rc = pci_register_driver(&mv_pci_driver);
if (rc < 0)
return rc;
#endif
rc = platform_driver_register(&mv_platform_driver);
#ifdef CONFIG_PCI
if (rc < 0)
pci_unregister_driver(&mv_pci_driver);
#endif
return rc;
}
static void __exit mv_exit(void)
{
#ifdef CONFIG_PCI
pci_unregister_driver(&mv_pci_driver);
#endif
platform_driver_unregister(&mv_platform_driver);
}
MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
module_init(mv_init);
module_exit(mv_exit);
| gpl-2.0 |
arnoldthebat/linux-stable | arch/arm64/mm/pageattr.c | 131 | 3737 | /*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
struct page_change_data {
pgprot_t set_mask;
pgprot_t clear_mask;
};
static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
void *data)
{
struct page_change_data *cdata = data;
pte_t pte = *ptep;
pte = clear_pte_bit(pte, cdata->clear_mask);
pte = set_pte_bit(pte, cdata->set_mask);
set_pte(ptep, pte);
return 0;
}
/*
* This function assumes that the range is mapped with PAGE_SIZE pages.
*/
static int __change_memory_common(unsigned long start, unsigned long size,
pgprot_t set_mask, pgprot_t clear_mask)
{
struct page_change_data data;
int ret;
data.set_mask = set_mask;
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, start + size);
return ret;
}
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
struct vm_struct *area;
if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK;
end = start + size;
WARN_ON_ONCE(1);
}
/*
* Kernel VA mappings are always live, and splitting live section
* mappings into page mappings may cause TLB conflicts. This means
* we have to ensure that changing the permission bits of the range
* we are operating on does not result in such splitting.
*
* Let's restrict ourselves to mappings created by vmalloc (or vmap).
* Those are guaranteed to consist entirely of page mappings, and
* splitting is never needed.
*
* So check whether the [addr, addr + size) interval is entirely
* covered by precisely one VM area that has the VM_ALLOC flag set.
*/
area = find_vm_area((void *)addr);
if (!area ||
end > (unsigned long)area->addr + area->size ||
!(area->flags & VM_ALLOC))
return -EINVAL;
if (!numpages)
return 0;
return __change_memory_common(start, size, set_mask, clear_mask);
}
int set_memory_ro(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_RDONLY),
__pgprot(PTE_WRITE));
}
int set_memory_rw(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_WRITE),
__pgprot(PTE_RDONLY));
}
int set_memory_nx(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_PXN),
__pgprot(0));
}
EXPORT_SYMBOL_GPL(set_memory_nx);
int set_memory_x(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(0),
__pgprot(PTE_PXN));
}
EXPORT_SYMBOL_GPL(set_memory_x);
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long addr = (unsigned long) page_address(page);
if (enable)
__change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(PTE_VALID),
__pgprot(0));
else
__change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(0),
__pgprot(PTE_VALID));
}
#endif
| gpl-2.0 |
RealVNC/Android-kernel-mako-NCM | drivers/staging/prima/CORE/SAP/src/sapModule.c | 387 | 76255 | /*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*===========================================================================
s a p M o d u l e . C
OVERVIEW:
This software unit holds the implementation of the WLAN SAP modules
functions providing EXTERNAL APIs. It is also where the global SAP module
context gets initialised
DEPENDENCIES:
Are listed for each API below.
Copyright (c) 2010 QUALCOMM Incorporated.
All Rights Reserved.
Qualcomm Confidential and Proprietary
===========================================================================*/
/*===========================================================================
EDIT HISTORY FOR FILE
This section contains comments describing changes made to the module.
Notice that changes are listed in reverse chronological order.
when who what, where, why
---------- --- --------------------------------------------------------
03/15/10 SOFTAP team Created module
06/03/10 js Added support to hostapd driven
* deauth/disassoc/mic failure
===========================================================================*/
/* $Header$ */
/*----------------------------------------------------------------------------
* Include Files
* -------------------------------------------------------------------------*/
#include "wlan_qct_tl.h"
#include "vos_trace.h"
// Pick up the sme callback registration API
#include "sme_Api.h"
// SAP API header file
#include "sapInternal.h"
#if defined(FEATURE_WLAN_NON_INTEGRATED_SOC)
#include "halInternal.h"
#endif
#include "smeInside.h"
/*----------------------------------------------------------------------------
* Preprocessor Definitions and Constants
* -------------------------------------------------------------------------*/
#define SAP_DEBUG
/*----------------------------------------------------------------------------
* Type Declarations
* -------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
* Global Data Definitions
* -------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
* External declarations for global context
* -------------------------------------------------------------------------*/
// No! Get this from VOS.
// The main per-Physical Link (per WLAN association) context.
ptSapContext gpSapCtx = NULL;
/*----------------------------------------------------------------------------
* Static Variable Definitions
* -------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
* Static Function Declarations and Definitions
* -------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
* Externalized Function Definitions
* -------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
* Function Declarations and Documentation
* -------------------------------------------------------------------------*/
/*==========================================================================
FUNCTION WLANSAP_Open
DESCRIPTION
Called at driver initialization (vos_open). SAP will initialize
all its internal resources and will wait for the call to start to
register with the other modules.
DEPENDENCIES
PARAMETERS
IN
pvosGCtx : Pointer to the global vos context; a handle to SAP's
control block can be extracted from its context
RETURN VALUE
The result code associated with performing the operation
VOS_STATUS_E_FAULT: Pointer to SAP cb is NULL ; access would cause a page
fault
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_Open
(
v_PVOID_t pvosGCtx
)
{
ptSapContext pSapCtx = NULL;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
VOS_ASSERT(pvosGCtx);
/*------------------------------------------------------------------------
Allocate (and sanity check?!) SAP control block
------------------------------------------------------------------------*/
vos_alloc_context(pvosGCtx, VOS_MODULE_ID_SAP, (v_VOID_t **)&pSapCtx, sizeof(tSapContext));
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
vos_mem_zero(pSapCtx, sizeof(tSapContext));
/*------------------------------------------------------------------------
Clean up SAP control block, initialize all values
------------------------------------------------------------------------*/
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANSAP_Open");
WLANSAP_CleanCB(pSapCtx, 0 /*do not empty*/);
// Setup the "link back" to the VOSS context
pSapCtx->pvosGCtx = pvosGCtx;
// Store a pointer to the SAP context provided by VOSS
gpSapCtx = pSapCtx;
/*------------------------------------------------------------------------
Allocate internal resources
------------------------------------------------------------------------*/
return VOS_STATUS_SUCCESS;
}// WLANSAP_Open
/*==========================================================================
FUNCTION WLANSAP_Start
DESCRIPTION
Called as part of the overall start procedure (vos_start). SAP will
use this call to register with TL as the SAP entity for
SAP RSN frames.
DEPENDENCIES
PARAMETERS
IN
pvosGCtx : Pointer to the global vos context; a handle to SAP's
control block can be extracted from its context
RETURN VALUE
The result code associated with performing the operation
VOS_STATUS_E_FAULT: Pointer to SAP cb is NULL ; access would cause a page
fault
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_Start
(
v_PVOID_t pvosGCtx
)
{
#ifdef WLAN_SOFTAP_FEATURE
ptSapContext pSapCtx = NULL;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,
"WLANSAP_Start invoked successfully\n");
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if ( NULL == pSapCtx )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
/*------------------------------------------------------------------------
For now, presume security is not enabled.
-----------------------------------------------------------------------*/
pSapCtx->ucSecEnabled = WLANSAP_SECURITY_ENABLED_STATE;
/*------------------------------------------------------------------------
Now configure the roaming profile links. To SSID and bssid.
------------------------------------------------------------------------*/
// We have room for two SSIDs.
pSapCtx->csrRoamProfile.SSIDs.numOfSSIDs = 1; // This is true for now.
pSapCtx->csrRoamProfile.SSIDs.SSIDList = pSapCtx->SSIDList; //Array of two
pSapCtx->csrRoamProfile.SSIDs.SSIDList[0].SSID.length = 0;
pSapCtx->csrRoamProfile.SSIDs.SSIDList[0].handoffPermitted = VOS_FALSE;
pSapCtx->csrRoamProfile.SSIDs.SSIDList[0].ssidHidden = pSapCtx->SSIDList[0].ssidHidden;
pSapCtx->csrRoamProfile.BSSIDs.numOfBSSIDs = 1; // This is true for now.
pSapCtx->csrRoamProfile.BSSIDs.bssid = &pSapCtx->bssid;
// Now configure the auth type in the roaming profile. To open.
pSapCtx->csrRoamProfile.negotiatedAuthType = eCSR_AUTH_TYPE_OPEN_SYSTEM; // open is the default
if( !VOS_IS_STATUS_SUCCESS( vos_lock_init( &pSapCtx->SapGlobalLock)))
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"WLANSAP_Start failed init lock\n");
return VOS_STATUS_E_FAULT;
}
#endif
return VOS_STATUS_SUCCESS;
}/* WLANSAP_Start */
/*==========================================================================
FUNCTION WLANSAP_Stop
DESCRIPTION
Called by vos_stop to stop operation in SAP, before close. SAP will suspend all
BT-AMP Protocol Adaption Layer operation and will wait for the close
request to clean up its resources.
DEPENDENCIES
PARAMETERS
IN
pvosGCtx : Pointer to the global vos context; a handle to SAP's
control block can be extracted from its context
RETURN VALUE
The result code associated with performing the operation
VOS_STATUS_E_FAULT: Pointer to SAP cb is NULL ; access would cause a page
fault
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_Stop
(
v_PVOID_t pvosGCtx
)
{
#ifdef WLAN_SOFTAP_FEATURE
ptSapContext pSapCtx = NULL;
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,
"WLANSAP_Stop invoked successfully ");
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
sapFreeRoamProfile(&pSapCtx->csrRoamProfile);
if( !VOS_IS_STATUS_SUCCESS( vos_lock_destroy( &pSapCtx->SapGlobalLock ) ) )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"WLANSAP_Stop failed destroy lock\n");
return VOS_STATUS_E_FAULT;
}
/*------------------------------------------------------------------------
Stop SAP (de-register RSN handler!?)
------------------------------------------------------------------------*/
#endif
return VOS_STATUS_SUCCESS;
}/* WLANSAP_Stop */
/*==========================================================================
FUNCTION WLANSAP_Close
DESCRIPTION
Called by vos_close during general driver close procedure. SAP will clean up
all the internal resources.
DEPENDENCIES
PARAMETERS
IN
pvosGCtx : Pointer to the global vos context; a handle to SAP's
control block can be extracted from its context
RETURN VALUE
The result code associated with performing the operation
VOS_STATUS_E_FAULT: Pointer to SAP cb is NULL ; access would cause a page
fault
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_Close
(
v_PVOID_t pvosGCtx
)
{
#ifdef WLAN_SOFTAP_FEATURE
ptSapContext pSapCtx = NULL;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,
"WLANSAP_Close invoked");
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
/*------------------------------------------------------------------------
Cleanup SAP control block.
------------------------------------------------------------------------*/
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANSAP_Close");
WLANSAP_CleanCB(pSapCtx, VOS_TRUE /* empty queues/lists/pkts if any*/);
/*------------------------------------------------------------------------
Free SAP context from VOSS global
------------------------------------------------------------------------*/
vos_free_context(pvosGCtx, VOS_MODULE_ID_SAP, pSapCtx);
#endif
return VOS_STATUS_SUCCESS;
}/* WLANSAP_Close */
/*----------------------------------------------------------------------------
* Utility Function implementations
* -------------------------------------------------------------------------*/
/*==========================================================================
FUNCTION WLANSAP_CleanCB
DESCRIPTION
Clear out all fields in the SAP context.
DEPENDENCIES
PARAMETERS
IN
pvosGCtx : Pointer to the global vos context; a handle to SAP's
control block can be extracted from its context
RETURN VALUE
The result code associated with performing the operation
VOS_STATUS_E_FAULT: Pointer to SAP cb is NULL ; access would cause a page
fault
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_CleanCB
(
ptSapContext pSapCtx,
v_U32_t freeFlag // 0 /*do not empty*/);
)
{
#ifdef WLAN_SOFTAP_FEATURE
/*------------------------------------------------------------------------
Sanity check SAP control block
------------------------------------------------------------------------*/
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
/*------------------------------------------------------------------------
Clean up SAP control block, initialize all values
------------------------------------------------------------------------*/
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANSAP_CleanCB");
vos_mem_zero( pSapCtx, sizeof(tSapContext));
pSapCtx->pvosGCtx = NULL;
pSapCtx->sapsMachine= eSAP_DISCONNECTED;
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: Initializing State: %d, sapContext value = %x",
__FUNCTION__, pSapCtx->sapsMachine, pSapCtx);
pSapCtx->sessionId = 0;
pSapCtx->channel = 0;
#endif
return VOS_STATUS_SUCCESS;
}// WLANSAP_CleanCB
/*==========================================================================
FUNCTION WLANSAP_pmcFullPwrReqCB
DESCRIPTION
Callback provide to PMC in the pmcRequestFullPower API.
DEPENDENCIES
PARAMETERS
IN
callbackContext: The user passed in a context to identify
status: The halStatus
RETURN VALUE
None
SIDE EFFECTS
============================================================================*/
void
WLANSAP_pmcFullPwrReqCB
(
void *callbackContext,
eHalStatus status
)
{
if(HAL_STATUS_SUCCESS(status))
{
//If success what else to be handled???
}
else
{
VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_FATAL,
"WLANSAP_pmcFullPwrReqCB: PMC failed to put the chip in Full power\n");
}
}// WLANSAP_pmcFullPwrReqCB
/*==========================================================================
FUNCTION WLANSAP_getState
DESCRIPTION
This api returns the current SAP state to the caller.
DEPENDENCIES
PARAMETERS
IN
pContext : Pointer to Sap Context structure
RETURN VALUE
Returns the SAP FSM state.
============================================================================*/
v_U8_t WLANSAP_getState
(
v_PVOID_t pvosGCtx
)
{
ptSapContext pSapCtx = NULL;
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if ( NULL == pSapCtx )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
return pSapCtx->sapsMachine;
}
/*==========================================================================
FUNCTION WLANSAP_StartBss
DESCRIPTION
This api function provides SAP FSM event eWLAN_SAP_PHYSICAL_LINK_CREATE for
starting AP BSS
DEPENDENCIES
PARAMETERS
IN
pContext : Pointer to Sap Context structure
pQctCommitConfig : Pointer to configuration structure passed down from HDD(HostApd for Android)
hdd_SapEventCallback: Callback function in HDD called by SAP to inform HDD about SAP results
pUsrContext : Parameter that will be passed back in all the SAP callback events.
RETURN VALUE
The result code associated with performing the operation
VOS_STATUS_E_FAULT: Pointer to SAP cb is NULL ; access would cause a page
fault
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_StartBss
(
v_PVOID_t pvosGCtx,//pwextCtx
tpWLAN_SAPEventCB pSapEventCallback,
tsap_Config_t *pConfig,
v_PVOID_t pUsrContext
)
{
tWLAN_SAPEvent sapEvent; /* State machine event*/
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
ptSapContext pSapCtx = NULL;
tANI_BOOLEAN restartNeeded;
tHalHandle hHal;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,
"WLANSAP_StartBss");
if (VOS_STA_SAP_MODE == vos_get_conparam ())
{
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if ( NULL == pSapCtx )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
pSapCtx->sapsMachine = eSAP_DISCONNECTED;
/* Channel selection is auto or configured */
pSapCtx->channel = pConfig->channel;
pSapCtx->pUsrContext = pUsrContext;
//Set the BSSID to your "self MAC Addr" read the mac address from Configuation ITEM received from HDD
pSapCtx->csrRoamProfile.BSSIDs.numOfBSSIDs = 1;
vos_mem_copy(pSapCtx->csrRoamProfile.BSSIDs.bssid,
pSapCtx->self_mac_addr,
sizeof( tCsrBssid ) );
//Save a copy to SAP context
vos_mem_copy(pSapCtx->csrRoamProfile.BSSIDs.bssid,
pConfig->self_macaddr.bytes, sizeof(v_MACADDR_t));
vos_mem_copy(pSapCtx->self_mac_addr,
pConfig->self_macaddr.bytes, sizeof(v_MACADDR_t));
//copy the configuration items to csrProfile
sapconvertToCsrProfile( pConfig, eCSR_BSS_TYPE_INFRA_AP, &pSapCtx->csrRoamProfile);
hHal = (tHalHandle)VOS_GET_HAL_CB(pvosGCtx);
if (NULL == hHal)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,
"%s: Invalid MAC context from pvosGCtx", __FUNCTION__);
}
else
{
//If concurrent session is running that is already associated
//then we just follow that sessions country info (whether
//present or not doesn't maater as we have to follow whatever
//STA session does)
if (0 == sme_GetConcurrentOperationChannel(hHal))
{
/* Setting the region/country information */
sme_setRegInfo(hHal, pConfig->countryCode);
sme_ResetCountryCodeInformation(hHal, &restartNeeded);
}
}
// Copy MAC filtering settings to sap context
pSapCtx->eSapMacAddrAclMode = pConfig->SapMacaddr_acl;
vos_mem_copy(pSapCtx->acceptMacList, pConfig->accept_mac, sizeof(pConfig->accept_mac));
pSapCtx->nAcceptMac = pConfig->num_accept_mac;
sapSortMacList(pSapCtx->acceptMacList, pSapCtx->nAcceptMac);
vos_mem_copy(pSapCtx->denyMacList, pConfig->deny_mac, sizeof(pConfig->deny_mac));
pSapCtx->nDenyMac = pConfig->num_deny_mac;
sapSortMacList(pSapCtx->denyMacList, pSapCtx->nDenyMac);
/* Fill in the event structure for FSM */
sapEvent.event = eSAP_HDD_START_INFRA_BSS;
sapEvent.params = 0;//pSapPhysLinkCreate
/* Store the HDD callback in SAP context */
pSapCtx->pfnSapEventCallback = pSapEventCallback;
/* Handle event*/
vosStatus = sapFsm(pSapCtx, &sapEvent);
}
else
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"SoftAp role has not been enabled");
}
return vosStatus;
}// WLANSAP_StartBss
/*==========================================================================
FUNCTION WLANSAP_StopBss
DESCRIPTION
This api function provides SAP FSM event eSAP_HDD_STOP_INFRA_BSS for
stopping AP BSS
DEPENDENCIES
PARAMETERS
IN
pvosGCtx : Pointer to the global vos context; a handle to SAP's
control block can be extracted from its contexe
RETURN VALUE
The result code associated with performing the operation
VOS_STATUS_E_FAULT: Pointer to VOSS GC is NULL ; access would cause a page
fault
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_StopBss
(
v_PVOID_t pvosGCtx
)
{
tWLAN_SAPEvent sapEvent; /* State machine event*/
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
ptSapContext pSapCtx = NULL;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,
"WLANSAP_StopBss");
if ( NULL == pvosGCtx )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid Global VOSS handle", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if (NULL == pSapCtx )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
/* Fill in the event structure for FSM */
sapEvent.event = eSAP_HDD_STOP_INFRA_BSS;
sapEvent.params = 0;
/* Handle event*/
vosStatus = sapFsm(pSapCtx, &sapEvent);
return vosStatus;
}
/*==========================================================================
FUNCTION WLANSAP_GetAssocStations
DESCRIPTION
This api function is used to probe the list of associated stations from various modules of CORE stack
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx : Pointer to vos global context structure
modId : Module from whom list of associtated stations is supposed to be probed. If an invalid module is passed
then by default VOS_MODULE_ID_PE will be probed
IN/OUT
pAssocStas : Pointer to list of associated stations that are known to the module specified in mod parameter
NOTE: The memory for this list will be allocated by the caller of this API
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_GetAssocStations
(
v_PVOID_t pvosGCtx,
VOS_MODULE_ID modId,
tpSap_AssocMacAddr pAssocStas
)
{
ptSapContext pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
sme_RoamGetAssociatedStas( VOS_GET_HAL_CB(pSapCtx->pvosGCtx), pSapCtx->sessionId,
modId,
pSapCtx->pUsrContext,
(v_PVOID_t *)pSapCtx->pfnSapEventCallback,
(v_U8_t *)pAssocStas );
return VOS_STATUS_SUCCESS;
}
/*==========================================================================
FUNCTION WLANSAP_RemoveWpsSessionOverlap
DESCRIPTION
This api function provides for Ap App/HDD to remove an entry from session session overlap info.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
pRemoveMac: pointer to v_MACADDR_t for session MAC address
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
VOS_STATUS_E_FAULT: Session is not dectected. The parameter is function not valid.
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_RemoveWpsSessionOverlap
(
v_PVOID_t pvosGCtx,
v_MACADDR_t pRemoveMac
)
{
ptSapContext pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
sme_RoamGetWpsSessionOverlap( VOS_GET_HAL_CB(pSapCtx->pvosGCtx), pSapCtx->sessionId,
pSapCtx->pUsrContext,
(v_PVOID_t *)pSapCtx->pfnSapEventCallback,
pRemoveMac);
return VOS_STATUS_SUCCESS;
}
/*==========================================================================
FUNCTION WLANSAP_getWpsSessionOverlap
DESCRIPTION
This api function provides for Ap App/HDD to get WPS session overlap info.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_getWpsSessionOverlap
(
v_PVOID_t pvosGCtx
)
{
v_MACADDR_t pRemoveMac = VOS_MAC_ADDR_ZERO_INITIALIZER;
ptSapContext pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
sme_RoamGetWpsSessionOverlap( VOS_GET_HAL_CB(pSapCtx->pvosGCtx), pSapCtx->sessionId,
pSapCtx->pUsrContext,
(v_PVOID_t *)pSapCtx->pfnSapEventCallback,
pRemoveMac);
return VOS_STATUS_SUCCESS;
}
/* This routine will set the mode of operation for ACL dynamically*/
VOS_STATUS
WLANSAP_SetMode ( v_PVOID_t pvosGCtx, v_U32_t mode)
{
ptSapContext pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
pSapCtx->eSapMacAddrAclMode = (eSapMacAddrACL)mode;
return VOS_STATUS_SUCCESS;
}
/* This routine will clear all the entries in accept list as well as deny list */
VOS_STATUS
WLANSAP_ClearACL( v_PVOID_t pvosGCtx)
{
ptSapContext pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
v_U8_t i;
if (NULL == pSapCtx)
{
return VOS_STATUS_E_RESOURCES;
}
if (pSapCtx->denyMacList != NULL)
{
for (i = 0; i < (pSapCtx->nDenyMac-1); i++)
{
vos_mem_zero((pSapCtx->denyMacList+i)->bytes, sizeof(v_MACADDR_t));
}
}
sapPrintACL(pSapCtx->denyMacList, pSapCtx->nDenyMac);
pSapCtx->nDenyMac = 0;
if (pSapCtx->acceptMacList!=NULL)
{
for (i = 0; i < (pSapCtx->nAcceptMac-1); i++)
{
vos_mem_zero((pSapCtx->acceptMacList+i)->bytes, sizeof(v_MACADDR_t));
}
}
sapPrintACL(pSapCtx->acceptMacList, pSapCtx->nAcceptMac);
pSapCtx->nAcceptMac = 0;
return VOS_STATUS_SUCCESS;
}
VOS_STATUS
WLANSAP_ModifyACL
(
v_PVOID_t pvosGCtx,
v_U8_t *pPeerStaMac,
eSapACLType listType,
eSapACLCmdType cmd
)
{
eSapBool staInWhiteList=eSAP_FALSE, staInBlackList=eSAP_FALSE;
v_U8_t staWLIndex, staBLIndex;
ptSapContext pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP Context", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_LOW,"Modify ACL entered\n"
"Before modification of ACL\n"
"size of accept and deny lists %d %d",
pSapCtx->nAcceptMac, pSapCtx->nDenyMac);
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,"*** WHITE LIST ***");
sapPrintACL(pSapCtx->acceptMacList, pSapCtx->nAcceptMac);
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,"*** BLACK LIST ***");
sapPrintACL(pSapCtx->denyMacList, pSapCtx->nDenyMac);
/* the expectation is a mac addr will not be in both the lists at the same time.
It is the responsiblity of userspace to ensure this */
staInWhiteList = sapSearchMacList(pSapCtx->acceptMacList, pSapCtx->nAcceptMac, pPeerStaMac, &staWLIndex);
staInBlackList = sapSearchMacList(pSapCtx->denyMacList, pSapCtx->nDenyMac, pPeerStaMac, &staBLIndex);
if (staInWhiteList && staInBlackList)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Peer mac %02x:%02x:%02x:%02x:%02x:%02x found in white and black lists."
"Initial lists passed incorrect. Cannot execute this command.",
pPeerStaMac[0], pPeerStaMac[1], pPeerStaMac[2], pPeerStaMac[3],
pPeerStaMac[4], pPeerStaMac[5]);
return VOS_STATUS_E_FAILURE;
}
switch(listType)
{
case eSAP_WHITE_LIST:
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_LOW, "cmd %d", cmd);
if (cmd == ADD_STA_TO_ACL)
{
//error check
// if list is already at max, return failure
if (pSapCtx->nAcceptMac == MAX_MAC_ADDRESS_ACCEPTED)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"White list is already maxed out. Cannot accept %02x:%02x:%02x:%02x:%02x:%02x",
pPeerStaMac[0], pPeerStaMac[1], pPeerStaMac[2], pPeerStaMac[3],
pPeerStaMac[4], pPeerStaMac[5]);
return VOS_STATUS_E_FAILURE;
}
if (staInWhiteList)
{
//Do nothing if already present in white list. Just print a warning
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_WARN,
"MAC address already present in white list %02x:%02x:%02x:%02x:%02x:%02x",
pPeerStaMac[0], pPeerStaMac[1], pPeerStaMac[2], pPeerStaMac[3],
pPeerStaMac[4], pPeerStaMac[5]);
} else
{
if (staInBlackList)
{
//remove it from black list before adding to the white list
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_WARN,
"STA present in black list so first remove from it");
sapRemoveMacFromACL(pSapCtx->denyMacList, &pSapCtx->nDenyMac, staBLIndex);
}
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
"... Now add to the white list");
sapAddMacToACL(pSapCtx->acceptMacList, &pSapCtx->nAcceptMac, pPeerStaMac);
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_LOW, "size of accept and deny lists %d %d",
pSapCtx->nAcceptMac, pSapCtx->nDenyMac);
}
}
else if (cmd == DELETE_STA_FROM_ACL)
{
if (staInWhiteList)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO, "Delete from white list");
sapRemoveMacFromACL(pSapCtx->acceptMacList, &pSapCtx->nAcceptMac, staWLIndex);
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_LOW, "size of accept and deny lists %d %d",
pSapCtx->nAcceptMac, pSapCtx->nDenyMac);
}
else
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_WARN,
"MAC address to be deleted is not present in the white list %02x:%02x:%02x:%02x:%02x:%02x",
pPeerStaMac[0], pPeerStaMac[1], pPeerStaMac[2], pPeerStaMac[3],
pPeerStaMac[4], pPeerStaMac[5]);
return VOS_STATUS_E_FAILURE;
}
}
else
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR, "Invalid cmd type passed");
return VOS_STATUS_E_FAILURE;
}
break;
case eSAP_BLACK_LIST:
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_LOW,
"cmd %d", cmd);
if (cmd == ADD_STA_TO_ACL)
{
//error check
// if list is already at max, return failure
if (pSapCtx->nDenyMac == MAX_MAC_ADDRESS_ACCEPTED)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Black list is already maxed out. Cannot accept %02x:%02x:%02x:%02x:%02x:%02x",
pPeerStaMac[0], pPeerStaMac[1], pPeerStaMac[2], pPeerStaMac[3],
pPeerStaMac[4], pPeerStaMac[5]);
return VOS_STATUS_E_FAILURE;
}
if (staInBlackList)
{
//Do nothing if already present in white list
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_WARN,
"MAC address already present in black list %02x:%02x:%02x:%02x:%02x:%02x",
pPeerStaMac[0], pPeerStaMac[1], pPeerStaMac[2], pPeerStaMac[3],
pPeerStaMac[4], pPeerStaMac[5]);
} else
{
if (staInWhiteList)
{
//remove it from white list before adding to the white list
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_WARN,
"Present in white list so first remove from it");
sapRemoveMacFromACL(pSapCtx->acceptMacList, &pSapCtx->nAcceptMac, staWLIndex);
}
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
"... Now add to black list");
sapAddMacToACL(pSapCtx->denyMacList, &pSapCtx->nDenyMac, pPeerStaMac);
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_LOW,"size of accept and deny lists %d %d",
pSapCtx->nAcceptMac, pSapCtx->nDenyMac);
}
}
else if (cmd == DELETE_STA_FROM_ACL)
{
if (staInBlackList)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO, "Delete from black list");
sapRemoveMacFromACL(pSapCtx->denyMacList, &pSapCtx->nDenyMac, staBLIndex);
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_LOW,"no accept and deny mac %d %d",
pSapCtx->nAcceptMac, pSapCtx->nDenyMac);
}
else
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_WARN,
"MAC address to be deleted is not present in the black list %02x:%02x:%02x:%02x:%02x:%02x",
pPeerStaMac[0], pPeerStaMac[1], pPeerStaMac[2], pPeerStaMac[3],
pPeerStaMac[4], pPeerStaMac[5]);
return VOS_STATUS_E_FAILURE;
}
}
else
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR, "Invalid cmd type passed");
return VOS_STATUS_E_FAILURE;
}
break;
default:
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Invalid list type passed %d",listType);
return VOS_STATUS_E_FAILURE;
}
}
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_LOW,"After modification of ACL");
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,"*** WHITE LIST ***");
sapPrintACL(pSapCtx->acceptMacList, pSapCtx->nAcceptMac);
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO_HIGH,"*** BLACK LIST ***");
sapPrintACL(pSapCtx->denyMacList, pSapCtx->nDenyMac);
return VOS_STATUS_SUCCESS;
}
/*==========================================================================
FUNCTION WLANSAP_DisassocSta
DESCRIPTION
This api function provides for Ap App/HDD initiated disassociation of station
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx : Pointer to vos global context structure
pPeerStaMac : Mac address of the station to disassociate
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_DisassocSta
(
v_PVOID_t pvosGCtx,
v_U8_t *pPeerStaMac
)
{
ptSapContext pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
sme_RoamDisconnectSta(VOS_GET_HAL_CB(pSapCtx->pvosGCtx), pSapCtx->sessionId,
pPeerStaMac);
return VOS_STATUS_SUCCESS;
}
#ifdef WLAN_SOFTAP_FEATURE
/*==========================================================================
FUNCTION WLANSAP_DeauthSta
DESCRIPTION
This api function provides for Ap App/HDD initiated deauthentication of station
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx : Pointer to vos global context structure
pPeerStaMac : Mac address of the station to deauthenticate
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_DeauthSta
(
v_PVOID_t pvosGCtx,
v_U8_t *pPeerStaMac
)
{
ptSapContext pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
sme_RoamDeauthSta(VOS_GET_HAL_CB(pSapCtx->pvosGCtx), pSapCtx->sessionId,
pPeerStaMac);
return VOS_STATUS_SUCCESS;
}
/*==========================================================================
FUNCTION WLANSAP_SetChannelRange
DESCRIPTION
This api function sets the range of channels for AP.
DEPENDENCIES
NA.
PARAMETERS
IN
startChannel : start channel
endChannel : End channel
operatingBand : Operating band (2.4GHz/5GHz)
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_SetChannelRange(tHalHandle hHal,v_U8_t startChannel, v_U8_t endChannel,
v_U8_t operatingBand)
{
v_U8_t validChannelFlag =0;
v_U8_t loopStartCount =0;
v_U8_t loopEndCount =0;
v_U8_t bandStartChannel =0;
v_U8_t bandEndChannel =0;
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
"WLANSAP_SetChannelRange:startChannel %d,EndChannel %d,Operatingband:%d",
startChannel,endChannel,operatingBand);
/*------------------------------------------------------------------------
Sanity check
------------------------------------------------------------------------*/
if (( WNI_CFG_SAP_CHANNEL_SELECT_OPERATING_BAND_APMIN > operatingBand)||
(WNI_CFG_SAP_CHANNEL_SELECT_OPERATING_BAND_APMAX < operatingBand))
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Invalid operatingBand on WLANSAP_SetChannelRange");
return VOS_STATUS_E_FAULT;
}
if (( WNI_CFG_SAP_CHANNEL_SELECT_START_CHANNEL_APMIN > startChannel)||
(WNI_CFG_SAP_CHANNEL_SELECT_START_CHANNEL_APMAX < startChannel))
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Invalid startChannel value on WLANSAP_SetChannelRange");
return VOS_STATUS_E_FAULT;
}
if (( WNI_CFG_SAP_CHANNEL_SELECT_END_CHANNEL_APMIN > endChannel)||
(WNI_CFG_SAP_CHANNEL_SELECT_END_CHANNEL_APMAX < endChannel))
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Invalid endChannel value on WLANSAP_SetChannelRange");
return VOS_STATUS_E_FAULT;
}
switch(operatingBand)
{
case RF_SUBBAND_2_4_GHZ:
bandStartChannel = RF_CHAN_1;
bandEndChannel = RF_CHAN_14;
break;
case RF_SUBBAND_5_LOW_GHZ:
bandStartChannel = RF_CHAN_36;
bandEndChannel = RF_CHAN_64;
break;
case RF_SUBBAND_5_MID_GHZ:
bandStartChannel = RF_CHAN_100;
bandEndChannel = RF_CHAN_140;
break;
case RF_SUBBAND_5_HIGH_GHZ:
bandStartChannel = RF_CHAN_149;
bandEndChannel = RF_CHAN_165;
break;
case RF_SUBBAND_4_9_GHZ:
bandStartChannel = RF_CHAN_240;
bandEndChannel = RF_CHAN_216;
break;
default:
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Invalid operatingBand value on WLANSAP_SetChannelRange");
break;
}
/* Validating the start channel is in range or not*/
for(loopStartCount = bandStartChannel ; loopStartCount <= bandEndChannel ;
loopStartCount++)
{
if(rfChannels[loopStartCount].channelNum == startChannel )
{
/* start channel is in the range */
break;
}
}
/* Validating the End channel is in range or not*/
for(loopEndCount = bandStartChannel ; loopEndCount <= bandEndChannel ;
loopEndCount++)
{
if(rfChannels[loopEndCount].channelNum == endChannel )
{
/* End channel is in the range */
break;
}
}
if((loopStartCount > bandEndChannel)||(loopEndCount > bandEndChannel))
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid startChannel-%d or EndChannel-%d for band -%d",
__FUNCTION__,startChannel,endChannel,operatingBand);
/* Supplied channels are nt in the operating band so set the default
channels for the given operating band */
startChannel = rfChannels[bandStartChannel].channelNum;
endChannel = rfChannels[bandEndChannel].channelNum;;
}
/*Search for the Active channels in the given range */
for( loopStartCount = bandStartChannel; loopStartCount <= bandEndChannel; loopStartCount++ )
{
if((startChannel <= rfChannels[loopStartCount].channelNum)&&
(endChannel >= rfChannels[loopStartCount].channelNum ))
{
if( regChannels[loopStartCount].enabled )
{
validChannelFlag = 1;
break;
}
}
}
if(0 == validChannelFlag)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s-No active channels present in the given range for the current region",
__FUNCTION__);
/* There is no active channel in the supplied range.Updating the config
with the default channels in the given band so that we can select the best channel in the sub-band*/
startChannel = rfChannels[bandStartChannel].channelNum;
endChannel = rfChannels[bandEndChannel].channelNum;;
}
if (ccmCfgSetInt(hHal, WNI_CFG_SAP_CHANNEL_SELECT_OPERATING_BAND,
operatingBand, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Could not pass on WNI_CFG_SAP_CHANNEL_SELECT_OPERATING_BAND to CCn");
return VOS_STATUS_E_FAULT;
}
if (ccmCfgSetInt(hHal, WNI_CFG_SAP_CHANNEL_SELECT_START_CHANNEL,
startChannel, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Could not pass on WNI_CFG_SAP_CHANNEL_SELECT_START_CHANNEL to CCM");
return VOS_STATUS_E_FAULT;
}
if (ccmCfgSetInt(hHal, WNI_CFG_SAP_CHANNEL_SELECT_END_CHANNEL,
endChannel, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Could not pass on WNI_CFG_SAP_CHANNEL_SELECT_START_CHANNEL to CCM");
return VOS_STATUS_E_FAULT;
}
return VOS_STATUS_SUCCESS;
}
#endif
/*==========================================================================
FUNCTION WLANSAP_SetCounterMeasure
DESCRIPTION
This api function is used to disassociate all the stations and prevent
association for any other station.Whenever Authenticator receives 2 mic failures
within 60 seconds, Authenticator will enable counter measure at SAP Layer.
Authenticator will start the 60 seconds timer. Core stack will not allow any
STA to associate till HDD disables counter meassure. Core stack shall kick out all the
STA which are currently associated and DIASSOC Event will be propogated to HDD for
each STA to clean up the HDD STA table.Once the 60 seconds timer expires, Authenticator
will disable the counter meassure at core stack. Now core stack can allow STAs to associate.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
bEnable: If TRUE than all stations will be disassociated and no more will be allowed to associate. If FALSE than CORE
will come out of this state.
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_SetCounterMeasure
(
v_PVOID_t pvosGCtx,
v_BOOL_t bEnable
)
{
ptSapContext pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
/*------------------------------------------------------------------------
Sanity check
Extract SAP control block
------------------------------------------------------------------------*/
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
sme_RoamTKIPCounterMeasures(VOS_GET_HAL_CB(pSapCtx->pvosGCtx), pSapCtx->sessionId, bEnable);
return VOS_STATUS_SUCCESS;
}
/*==========================================================================
FUNCTION WLANSAP_SetKeysSta
DESCRIPTION
This api function provides for Ap App/HDD to set key for a station.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
pSetKeyInfo: tCsrRoamSetKey structure for the station
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_SetKeySta
(
v_PVOID_t pvosGCtx, tCsrRoamSetKey *pSetKeyInfo
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
ptSapContext pSapCtx = NULL;
v_PVOID_t hHal = NULL;
eHalStatus halStatus = eHAL_STATUS_FAILURE;
v_U32_t roamId=0xFF;
if (VOS_STA_SAP_MODE == vos_get_conparam ( ))
{
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if (NULL == hHal)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid HAL pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
halStatus = sme_RoamSetKey(hHal, pSapCtx->sessionId, pSetKeyInfo, &roamId);
if (halStatus == eHAL_STATUS_SUCCESS)
{
vosStatus = VOS_STATUS_SUCCESS;
} else
{
vosStatus = VOS_STATUS_E_FAULT;
}
}
else
vosStatus = VOS_STATUS_E_FAULT;
return vosStatus;
}
/*==========================================================================
FUNCTION WLANSAP_DelKeySta
DESCRIPTION
This api function provides for Ap App/HDD to delete key for a station.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
pSetKeyInfo: tCsrRoamRemoveKey structure for the station
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_DelKeySta
(
v_PVOID_t pvosGCtx,
tCsrRoamRemoveKey *pRemoveKeyInfo
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
ptSapContext pSapCtx = NULL;
v_PVOID_t hHal = NULL;
eHalStatus halStatus = eHAL_STATUS_FAILURE;
v_U32_t roamId=0xFF;
tCsrRoamRemoveKey RemoveKeyInfo;
if (VOS_STA_SAP_MODE == vos_get_conparam ( ))
{
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if (NULL == hHal)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid HAL pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
vos_mem_zero(&RemoveKeyInfo, sizeof(RemoveKeyInfo));
RemoveKeyInfo.encType = pRemoveKeyInfo->encType;
vos_mem_copy(RemoveKeyInfo.peerMac, pRemoveKeyInfo->peerMac, WNI_CFG_BSSID_LEN);
RemoveKeyInfo.keyId = pRemoveKeyInfo->keyId;
halStatus = sme_RoamRemoveKey(hHal, pSapCtx->sessionId, &RemoveKeyInfo, &roamId);
if (HAL_STATUS_SUCCESS(halStatus))
{
vosStatus = VOS_STATUS_SUCCESS;
}
else
{
vosStatus = VOS_STATUS_E_FAULT;
}
}
else
vosStatus = VOS_STATUS_E_FAULT;
return vosStatus;
}
VOS_STATUS
WLANSap_getstationIE_information(v_PVOID_t pvosGCtx,
v_U32_t *pLen,
v_U8_t *pBuf)
{
VOS_STATUS vosStatus = VOS_STATUS_E_FAILURE;
ptSapContext pSapCtx = NULL;
v_U32_t len = 0;
if (VOS_STA_SAP_MODE == vos_get_conparam ( )){
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
if (pLen)
{
len = *pLen;
*pLen = pSapCtx->nStaWPARSnReqIeLength;
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
"%s: WPAIE len : %x", __FUNCTION__, *pLen);
if(pBuf)
{
if(len >= pSapCtx->nStaWPARSnReqIeLength)
{
vos_mem_copy( pBuf, pSapCtx->pStaWpaRsnReqIE, pSapCtx->nStaWPARSnReqIeLength);
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
"%s: WPAIE: %02x:%02x:%02x:%02x:%02x:%02x",
__FUNCTION__,
pBuf[0], pBuf[1], pBuf[2],
pBuf[3], pBuf[4], pBuf[5]);
vosStatus = VOS_STATUS_SUCCESS;
}
}
}
}
if( VOS_STATUS_E_FAILURE == vosStatus)
{
VOS_TRACE(VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Error unable to populate the RSNWPAIE",
__FUNCTION__);
}
return vosStatus;
}
/*==========================================================================
FUNCTION WLANSAP_Set_WpsIe
DESCRIPTION
This api function provides for Ap App/HDD to set WPS IE.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
pWPSIE: tSap_WPSIE structure that include WPS IEs
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_Set_WpsIe
(
v_PVOID_t pvosGCtx, tSap_WPSIE *pSap_WPSIe
)
{
ptSapContext pSapCtx = NULL;
v_PVOID_t hHal = NULL;
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
"%s, %d", __FUNCTION__, __LINE__);
if(VOS_STA_SAP_MODE == vos_get_conparam ( )) {
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if ( NULL == pSapCtx )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if ( NULL == hHal ){
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid HAL pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
if ( sap_AcquireGlobalLock( pSapCtx ) == VOS_STATUS_SUCCESS )
{
if (pSap_WPSIe->sapWPSIECode == eSAP_WPS_BEACON_IE)
{
vos_mem_copy(&pSapCtx->APWPSIEs.SirWPSBeaconIE, &pSap_WPSIe->sapwpsie.sapWPSBeaconIE, sizeof(tSap_WPSBeaconIE));
}
else if (pSap_WPSIe->sapWPSIECode == eSAP_WPS_PROBE_RSP_IE)
{
vos_mem_copy(&pSapCtx->APWPSIEs.SirWPSProbeRspIE, &pSap_WPSIe->sapwpsie.sapWPSProbeRspIE, sizeof(tSap_WPSProbeRspIE));
}
else
{
sap_ReleaseGlobalLock( pSapCtx );
return VOS_STATUS_E_FAULT;
}
sap_ReleaseGlobalLock( pSapCtx );
return VOS_STATUS_SUCCESS;
}
else
return VOS_STATUS_E_FAULT;
}
else
return VOS_STATUS_E_FAULT;
}
/*==========================================================================
FUNCTION WLANSAP_Update_WpsIe
DESCRIPTION
This api function provides for Ap App/HDD to update WPS IEs.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_Update_WpsIe
(
v_PVOID_t pvosGCtx
)
{
VOS_STATUS vosStatus = VOS_STATUS_E_FAULT;
ptSapContext pSapCtx = NULL;
eHalStatus halStatus = eHAL_STATUS_FAILURE;
v_PVOID_t hHal = NULL;
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s, %d", __FUNCTION__, __LINE__);
if(VOS_STA_SAP_MODE == vos_get_conparam ( )){
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if ( NULL == pSapCtx )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if ( NULL == hHal ){
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid HAL pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
halStatus = sme_RoamUpdateAPWPSIE( hHal, pSapCtx->sessionId, &pSapCtx->APWPSIEs);
if(halStatus == eHAL_STATUS_SUCCESS) {
vosStatus = VOS_STATUS_SUCCESS;
} else
{
vosStatus = VOS_STATUS_E_FAULT;
}
}
return vosStatus;
}
/*==========================================================================
FUNCTION WLANSAP_Get_WPS_State
DESCRIPTION
This api function provides for Ap App/HDD to check if WPS session in process.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
OUT
pbWPSState: Pointer to variable to indicate if it is in WPS Registration state
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS
WLANSAP_Get_WPS_State
(
v_PVOID_t pvosGCtx, v_BOOL_t *bWPSState
)
{
ptSapContext pSapCtx = NULL;
v_PVOID_t hHal = NULL;
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_INFO,
"%s, %d", __FUNCTION__, __LINE__);
if(VOS_STA_SAP_MODE == vos_get_conparam ( )){
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if ( NULL == pSapCtx )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if ( NULL == hHal ){
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid HAL pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
if ( sap_AcquireGlobalLock(pSapCtx ) == VOS_STATUS_SUCCESS )
{
if(pSapCtx->APWPSIEs.SirWPSProbeRspIE.FieldPresent & SIR_WPS_PROBRSP_SELECTEDREGISTRA_PRESENT)
*bWPSState = eANI_BOOLEAN_TRUE;
else
*bWPSState = eANI_BOOLEAN_FALSE;
sap_ReleaseGlobalLock( pSapCtx );
return VOS_STATUS_SUCCESS;
}
else
return VOS_STATUS_E_FAULT;
}
else
return VOS_STATUS_E_FAULT;
}
VOS_STATUS
sap_AcquireGlobalLock
(
ptSapContext pSapCtx
)
{
VOS_STATUS vosStatus = VOS_STATUS_E_FAULT;
if( VOS_IS_STATUS_SUCCESS( vos_lock_acquire( &pSapCtx->SapGlobalLock) ) )
{
vosStatus = VOS_STATUS_SUCCESS;
}
return (vosStatus);
}
VOS_STATUS
sap_ReleaseGlobalLock
(
ptSapContext pSapCtx
)
{
VOS_STATUS vosStatus = VOS_STATUS_E_FAULT;
if( VOS_IS_STATUS_SUCCESS( vos_lock_release( &pSapCtx->SapGlobalLock) ) )
{
vosStatus = VOS_STATUS_SUCCESS;
}
return (vosStatus);
}
/*==========================================================================
FUNCTION WLANSAP_Set_WPARSNIes
DESCRIPTION
This api function provides for Ap App/HDD to set AP WPA and RSN IE in its beacon and probe response.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
pWPARSNIEs: buffer to the WPA/RSN IEs
WPARSNIEsLen: length of WPA/RSN IEs
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS WLANSAP_Set_WPARSNIes(v_PVOID_t pvosGCtx, v_U8_t *pWPARSNIEs, v_U32_t WPARSNIEsLen)
{
ptSapContext pSapCtx = NULL;
eHalStatus halStatus = eHAL_STATUS_FAILURE;
v_PVOID_t hHal = NULL;
if(VOS_STA_SAP_MODE == vos_get_conparam ( )){
pSapCtx = VOS_GET_SAP_CB(pvosGCtx);
if ( NULL == pSapCtx )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if ( NULL == hHal ){
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid HAL pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
pSapCtx->APWPARSNIEs.length = (tANI_U16)WPARSNIEsLen;
vos_mem_copy(pSapCtx->APWPARSNIEs.rsnIEdata, pWPARSNIEs, WPARSNIEsLen);
halStatus = sme_RoamUpdateAPWPARSNIEs( hHal, pSapCtx->sessionId, &pSapCtx->APWPARSNIEs);
if(halStatus == eHAL_STATUS_SUCCESS) {
return VOS_STATUS_SUCCESS;
} else
{
return VOS_STATUS_E_FAULT;
}
}
return VOS_STATUS_E_FAULT;
}
VOS_STATUS WLANSAP_GetStatistics(v_PVOID_t pvosGCtx, tSap_SoftapStats *statBuf, v_BOOL_t bReset)
{
if (NULL == pvosGCtx)
{
return VOS_STATUS_E_FAULT;
}
return (WLANTL_GetSoftAPStatistics(pvosGCtx, statBuf, bReset));
}
#ifdef WLAN_FEATURE_P2P
/*==========================================================================
FUNCTION WLANSAP_SendAction
DESCRIPTION
This api function provides to send action frame sent by upper layer.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
pBuf: Pointer of the action frame to be transmitted
len: Length of the action frame
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS WLANSAP_SendAction( v_PVOID_t pvosGCtx, const tANI_U8 *pBuf,
tANI_U32 len, tANI_U16 wait )
{
ptSapContext pSapCtx = NULL;
v_PVOID_t hHal = NULL;
eHalStatus halStatus = eHAL_STATUS_FAILURE;
if( VOS_STA_SAP_MODE == vos_get_conparam ( ) )
{
pSapCtx = VOS_GET_SAP_CB( pvosGCtx );
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if( ( NULL == hHal ) || ( eSAP_TRUE != pSapCtx->isSapSessionOpen ) )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: HAL pointer (%p) NULL OR SME session is not open (%d)",
__FUNCTION__, hHal, pSapCtx->isSapSessionOpen );
return VOS_STATUS_E_FAULT;
}
halStatus = sme_sendAction( hHal, pSapCtx->sessionId, pBuf, len, 0 , 0);
if ( eHAL_STATUS_SUCCESS == halStatus )
{
return VOS_STATUS_SUCCESS;
}
}
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Failed to Send Action Frame");
return VOS_STATUS_E_FAULT;
}
/*==========================================================================
FUNCTION WLANSAP_RemainOnChannel
DESCRIPTION
This api function provides to set Remain On channel on specified channel
for specified duration.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
channel: Channel on which driver has to listen
duration: Duration for which driver has to listen on specified channel
callback: Callback function to be called once Listen is done.
pContext: Context needs to be called in callback function.
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS WLANSAP_RemainOnChannel( v_PVOID_t pvosGCtx,
tANI_U8 channel, tANI_U32 duration,
remainOnChanCallback callback,
void *pContext )
{
ptSapContext pSapCtx = NULL;
v_PVOID_t hHal = NULL;
eHalStatus halStatus = eHAL_STATUS_FAILURE;
if( VOS_STA_SAP_MODE == vos_get_conparam ( ) )
{
pSapCtx = VOS_GET_SAP_CB( pvosGCtx );
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if( ( NULL == hHal ) || ( eSAP_TRUE != pSapCtx->isSapSessionOpen ) )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: HAL pointer (%p) NULL OR SME session is not open (%d)",
__FUNCTION__, hHal, pSapCtx->isSapSessionOpen );
return VOS_STATUS_E_FAULT;
}
halStatus = sme_RemainOnChannel( hHal, pSapCtx->sessionId,
channel, duration, callback, pContext );
if( eHAL_STATUS_SUCCESS == halStatus )
{
return VOS_STATUS_SUCCESS;
}
}
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Failed to Set Remain on Channel");
return VOS_STATUS_E_FAULT;
}
/*==========================================================================
FUNCTION WLANSAP_CancelRemainOnChannel
DESCRIPTION
This api cancel previous remain on channel request.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS WLANSAP_CancelRemainOnChannel( v_PVOID_t pvosGCtx )
{
ptSapContext pSapCtx = NULL;
v_PVOID_t hHal = NULL;
eHalStatus halStatus = eHAL_STATUS_FAILURE;
if( VOS_STA_SAP_MODE == vos_get_conparam ( ) )
{
pSapCtx = VOS_GET_SAP_CB( pvosGCtx );
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if( ( NULL == hHal ) || ( eSAP_TRUE != pSapCtx->isSapSessionOpen ) )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: HAL pointer (%p) NULL OR SME session is not open (%d)",
__FUNCTION__, hHal, pSapCtx->isSapSessionOpen );
return VOS_STATUS_E_FAULT;
}
halStatus = sme_CancelRemainOnChannel( hHal, pSapCtx->sessionId );
if( eHAL_STATUS_SUCCESS == halStatus )
{
return VOS_STATUS_SUCCESS;
}
}
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Failed to Cancel Remain on Channel");
return VOS_STATUS_E_FAULT;
}
/*==========================================================================
FUNCTION WLANSAP_RegisterMgmtFrame
DESCRIPTION
HDD use this API to register specified type of frame with CORE stack.
On receiving such kind of frame CORE stack should pass this frame to HDD
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
frameType: frameType that needs to be registered with PE.
matchData: Data pointer which should be matched after frame type is matched.
matchLen: Length of the matchData
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS WLANSAP_RegisterMgmtFrame( v_PVOID_t pvosGCtx, tANI_U16 frameType,
tANI_U8* matchData, tANI_U16 matchLen )
{
ptSapContext pSapCtx = NULL;
v_PVOID_t hHal = NULL;
eHalStatus halStatus = eHAL_STATUS_FAILURE;
if( VOS_STA_SAP_MODE == vos_get_conparam ( ) )
{
pSapCtx = VOS_GET_SAP_CB( pvosGCtx );
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if( ( NULL == hHal ) || ( eSAP_TRUE != pSapCtx->isSapSessionOpen ) )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: HAL pointer (%p) NULL OR SME session is not open (%d)",
__FUNCTION__, hHal, pSapCtx->isSapSessionOpen );
return VOS_STATUS_E_FAULT;
}
halStatus = sme_RegisterMgmtFrame(hHal, pSapCtx->sessionId,
frameType, matchData, matchLen);
if( eHAL_STATUS_SUCCESS == halStatus )
{
return VOS_STATUS_SUCCESS;
}
}
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Failed to Register MGMT frame");
return VOS_STATUS_E_FAULT;
}
/*==========================================================================
FUNCTION WLANSAP_DeRegisterMgmtFrame
DESCRIPTION
This API is used to deregister previously registered frame.
DEPENDENCIES
NA.
PARAMETERS
IN
pvosGCtx: Pointer to vos global context structure
frameType: frameType that needs to be De-registered with PE.
matchData: Data pointer which should be matched after frame type is matched.
matchLen: Length of the matchData
RETURN VALUE
The VOS_STATUS code associated with performing the operation
VOS_STATUS_SUCCESS: Success
SIDE EFFECTS
============================================================================*/
VOS_STATUS WLANSAP_DeRegisterMgmtFrame( v_PVOID_t pvosGCtx, tANI_U16 frameType,
tANI_U8* matchData, tANI_U16 matchLen )
{
ptSapContext pSapCtx = NULL;
v_PVOID_t hHal = NULL;
eHalStatus halStatus = eHAL_STATUS_FAILURE;
if( VOS_STA_SAP_MODE == vos_get_conparam ( ) )
{
pSapCtx = VOS_GET_SAP_CB( pvosGCtx );
if (NULL == pSapCtx)
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid SAP pointer from pvosGCtx", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(pSapCtx->pvosGCtx);
if( ( NULL == hHal ) || ( eSAP_TRUE != pSapCtx->isSapSessionOpen ) )
{
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"%s: HAL pointer (%p) NULL OR SME session is not open (%d)",
__FUNCTION__, hHal, pSapCtx->isSapSessionOpen );
return VOS_STATUS_E_FAULT;
}
halStatus = sme_DeregisterMgmtFrame( hHal, pSapCtx->sessionId,
frameType, matchData, matchLen );
if( eHAL_STATUS_SUCCESS == halStatus )
{
return VOS_STATUS_SUCCESS;
}
}
VOS_TRACE( VOS_MODULE_ID_SAP, VOS_TRACE_LEVEL_ERROR,
"Failed to Deregister MGMT frame");
return VOS_STATUS_E_FAULT;
}
#endif // WLAN_FEATURE_P2P
| gpl-2.0 |
keily90/tf101-nv-linux | drivers/net/vxge/vxge-config.c | 387 | 137285 | /******************************************************************************
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by reference.
* Drivers based on or derived from this code fall under the GPL and must
* retain the authorship, copyright and license notice. This file is not
* a complete program and may only be used when the entire operating
* system is licensed under the GPL.
* See the file COPYING in this distribution for more information.
*
* vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
* Virtualized Server Adapter.
* Copyright(c) 2002-2010 Exar Corp.
******************************************************************************/
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include "vxge-traffic.h"
#include "vxge-config.h"
#include "vxge-main.h"
#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
status = __vxge_hw_vpath_stats_access(vpath, \
VXGE_HW_STATS_OP_READ, \
offset, \
&val64); \
if (status != VXGE_HW_OK) \
return status; \
}
static void
vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
{
u64 val64;
val64 = readq(&vp_reg->rxmac_vcfg0);
val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
writeq(val64, &vp_reg->rxmac_vcfg0);
val64 = readq(&vp_reg->rxmac_vcfg0);
}
/*
* vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
*/
int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
{
struct vxge_hw_vpath_reg __iomem *vp_reg;
struct __vxge_hw_virtualpath *vpath;
u64 val64, rxd_count, rxd_spat;
int count = 0, total_count = 0;
vpath = &hldev->virtual_paths[vp_id];
vp_reg = vpath->vp_reg;
vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
/* Check that the ring controller for this vpath has enough free RxDs
* to send frames to the host. This is done by reading the
* PRC_RXD_DOORBELL_VPn register and comparing the read value to the
* RXD_SPAT value for the vpath.
*/
val64 = readq(&vp_reg->prc_cfg6);
rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
/* Use a factor of 2 when comparing rxd_count against rxd_spat for some
* leg room.
*/
rxd_spat *= 2;
do {
mdelay(1);
rxd_count = readq(&vp_reg->prc_rxd_doorbell);
/* Check that the ring controller for this vpath does
* not have any frame in its pipeline.
*/
val64 = readq(&vp_reg->frm_in_progress_cnt);
if ((rxd_count <= rxd_spat) || (val64 > 0))
count = 0;
else
count++;
total_count++;
} while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
(total_count < VXGE_HW_MAX_POLLING_COUNT));
if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
__func__);
return total_count;
}
/* vxge_hw_device_wait_receive_idle - This function waits until all frames
* stored in the frame buffer for each vpath assigned to the given
* function (hldev) have been sent to the host.
*/
void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
{
int i, total_count = 0;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
continue;
total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
break;
}
}
/*
* __vxge_hw_device_register_poll
* Will poll certain register for specified amount of time.
* Will poll until masked bit is not cleared.
*/
static enum vxge_hw_status
__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
u64 val64;
u32 i = 0;
enum vxge_hw_status ret = VXGE_HW_FAIL;
udelay(10);
do {
val64 = readq(reg);
if (!(val64 & mask))
return VXGE_HW_OK;
udelay(100);
} while (++i <= 9);
i = 0;
do {
val64 = readq(reg);
if (!(val64 & mask))
return VXGE_HW_OK;
mdelay(1);
} while (++i <= max_millis);
return ret;
}
static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
u64 mask, u32 max_millis)
{
__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
wmb();
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
wmb();
return __vxge_hw_device_register_poll(addr, mask, max_millis);
}
static enum vxge_hw_status
vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
u64 *steer_ctrl)
{
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
enum vxge_hw_status status;
u64 val64;
u32 retry = 0, max_retry = 3;
spin_lock(&vpath->lock);
if (!vpath->vp_open) {
spin_unlock(&vpath->lock);
max_retry = 100;
}
writeq(*data0, &vp_reg->rts_access_steer_data0);
writeq(*data1, &vp_reg->rts_access_steer_data1);
wmb();
val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
*steer_ctrl;
status = __vxge_hw_pio_mem_write64(val64,
&vp_reg->rts_access_steer_ctrl,
VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
VXGE_HW_DEF_DEVICE_POLL_MILLIS);
/* The __vxge_hw_device_register_poll can udelay for a significant
* amount of time, blocking other process from the CPU. If it delays
* for ~5secs, a NMI error can occur. A way around this is to give up
* the processor via msleep, but this is not allowed is under lock.
* So, only allow it to sleep for ~4secs if open. Otherwise, delay for
* 1sec and sleep for 10ms until the firmware operation has completed
* or timed-out.
*/
while ((status != VXGE_HW_OK) && retry++ < max_retry) {
if (!vpath->vp_open)
msleep(20);
status = __vxge_hw_device_register_poll(
&vp_reg->rts_access_steer_ctrl,
VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
VXGE_HW_DEF_DEVICE_POLL_MILLIS);
}
if (status != VXGE_HW_OK)
goto out;
val64 = readq(&vp_reg->rts_access_steer_ctrl);
if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
*data0 = readq(&vp_reg->rts_access_steer_data0);
*data1 = readq(&vp_reg->rts_access_steer_data1);
*steer_ctrl = val64;
} else
status = VXGE_HW_FAIL;
out:
if (vpath->vp_open)
spin_unlock(&vpath->lock);
return status;
}
enum vxge_hw_status
vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
u32 *minor, u32 *build)
{
u64 data0 = 0, data1 = 0, steer_ctrl = 0;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status;
vpath = &hldev->virtual_paths[hldev->first_vp_id];
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_FW_UPGRADE_ACTION,
VXGE_HW_FW_UPGRADE_MEMO,
VXGE_HW_FW_UPGRADE_OFFSET_READ,
&data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
return status;
*major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
*minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
*build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
return status;
}
enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
{
u64 data0 = 0, data1 = 0, steer_ctrl = 0;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status;
u32 ret;
vpath = &hldev->virtual_paths[hldev->first_vp_id];
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_FW_UPGRADE_ACTION,
VXGE_HW_FW_UPGRADE_MEMO,
VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
&data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
goto exit;
}
ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
if (ret != 1) {
vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
__func__, ret);
status = VXGE_HW_FAIL;
}
exit:
return status;
}
enum vxge_hw_status
vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
{
u64 data0 = 0, data1 = 0, steer_ctrl = 0;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status;
int ret_code, sec_code;
vpath = &hldev->virtual_paths[hldev->first_vp_id];
/* send upgrade start command */
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_FW_UPGRADE_ACTION,
VXGE_HW_FW_UPGRADE_MEMO,
VXGE_HW_FW_UPGRADE_OFFSET_START,
&data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
__func__);
return status;
}
/* Transfer fw image to adapter 16 bytes at a time */
for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
steer_ctrl = 0;
/* The next 128bits of fwdata to be loaded onto the adapter */
data0 = *((u64 *)fwdata);
data1 = *((u64 *)fwdata + 1);
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_FW_UPGRADE_ACTION,
VXGE_HW_FW_UPGRADE_MEMO,
VXGE_HW_FW_UPGRADE_OFFSET_SEND,
&data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
__func__);
goto out;
}
ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
switch (ret_code) {
case VXGE_HW_FW_UPGRADE_OK:
/* All OK, send next 16 bytes. */
break;
case VXGE_FW_UPGRADE_BYTES2SKIP:
/* skip bytes in the stream */
fwdata += (data0 >> 8) & 0xFFFFFFFF;
break;
case VXGE_HW_FW_UPGRADE_DONE:
goto out;
case VXGE_HW_FW_UPGRADE_ERR:
sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
switch (sec_code) {
case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
printk(KERN_ERR
"corrupted data from .ncf file\n");
break;
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
printk(KERN_ERR "invalid .ncf file\n");
break;
case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
printk(KERN_ERR "buffer overflow\n");
break;
case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
printk(KERN_ERR "failed to flash the image\n");
break;
case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
printk(KERN_ERR
"generic error. Unknown error type\n");
break;
default:
printk(KERN_ERR "Unknown error of type %d\n",
sec_code);
break;
}
status = VXGE_HW_FAIL;
goto out;
default:
printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
status = VXGE_HW_FAIL;
goto out;
}
/* point to next 16 bytes */
fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
}
out:
return status;
}
enum vxge_hw_status
vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
struct eprom_image *img)
{
u64 data0 = 0, data1 = 0, steer_ctrl = 0;
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status;
int i;
vpath = &hldev->virtual_paths[hldev->first_vp_id];
for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
data1 = steer_ctrl = 0;
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_FW_API_GET_EPROM_REV,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
break;
img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
}
return status;
}
/*
* __vxge_hw_channel_free - Free memory allocated for channel
* This function deallocates memory from the channel and various arrays
* in the channel
*/
static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
{
kfree(channel->work_arr);
kfree(channel->free_arr);
kfree(channel->reserve_arr);
kfree(channel->orig_arr);
kfree(channel);
}
/*
* __vxge_hw_channel_initialize - Initialize a channel
* This function initializes a channel by properly setting the
* various references
*/
static enum vxge_hw_status
__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
{
u32 i;
struct __vxge_hw_virtualpath *vpath;
vpath = channel->vph->vpath;
if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
for (i = 0; i < channel->length; i++)
channel->orig_arr[i] = channel->reserve_arr[i];
}
switch (channel->type) {
case VXGE_HW_CHANNEL_TYPE_FIFO:
vpath->fifoh = (struct __vxge_hw_fifo *)channel;
channel->stats = &((struct __vxge_hw_fifo *)
channel)->stats->common_stats;
break;
case VXGE_HW_CHANNEL_TYPE_RING:
vpath->ringh = (struct __vxge_hw_ring *)channel;
channel->stats = &((struct __vxge_hw_ring *)
channel)->stats->common_stats;
break;
default:
break;
}
return VXGE_HW_OK;
}
/*
* __vxge_hw_channel_reset - Resets a channel
* This function resets a channel by properly setting the various references
*/
static enum vxge_hw_status
__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
{
u32 i;
for (i = 0; i < channel->length; i++) {
if (channel->reserve_arr != NULL)
channel->reserve_arr[i] = channel->orig_arr[i];
if (channel->free_arr != NULL)
channel->free_arr[i] = NULL;
if (channel->work_arr != NULL)
channel->work_arr[i] = NULL;
}
channel->free_ptr = channel->length;
channel->reserve_ptr = channel->length;
channel->reserve_top = 0;
channel->post_index = 0;
channel->compl_index = 0;
return VXGE_HW_OK;
}
/*
* __vxge_hw_device_pci_e_init
* Initialize certain PCI/PCI-X configuration registers
* with recommended values. Save config space for future hw resets.
*/
static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
{
u16 cmd = 0;
/* Set the PErr Repconse bit and SERR in PCI command register. */
pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
cmd |= 0x140;
pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
pci_save_state(hldev->pdev);
}
/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
* in progress
* This routine checks the vpath reset in progress register is turned zero
*/
static enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
{
enum vxge_hw_status status;
status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
VXGE_HW_DEF_DEVICE_POLL_MILLIS);
return status;
}
/*
* _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
* Set the swapper bits appropriately for the lagacy section.
*/
static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
val64 = readq(&legacy_reg->toc_swapper_fb);
wmb();
switch (val64) {
case VXGE_HW_SWAPPER_INITIAL_VALUE:
return status;
case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
&legacy_reg->pifm_rd_swap_en);
writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
&legacy_reg->pifm_rd_flip_en);
writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
&legacy_reg->pifm_wr_swap_en);
writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
&legacy_reg->pifm_wr_flip_en);
break;
case VXGE_HW_SWAPPER_BYTE_SWAPPED:
writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
&legacy_reg->pifm_rd_swap_en);
writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
&legacy_reg->pifm_wr_swap_en);
break;
case VXGE_HW_SWAPPER_BIT_FLIPPED:
writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
&legacy_reg->pifm_rd_flip_en);
writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
&legacy_reg->pifm_wr_flip_en);
break;
}
wmb();
val64 = readq(&legacy_reg->toc_swapper_fb);
if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
status = VXGE_HW_ERR_SWAPPER_CTRL;
return status;
}
/*
* __vxge_hw_device_toc_get
* This routine sets the swapper and reads the toc pointer and returns the
* memory mapped address of the toc
*/
static struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0)
{
u64 val64;
struct vxge_hw_toc_reg __iomem *toc = NULL;
enum vxge_hw_status status;
struct vxge_hw_legacy_reg __iomem *legacy_reg =
(struct vxge_hw_legacy_reg __iomem *)bar0;
status = __vxge_hw_legacy_swapper_set(legacy_reg);
if (status != VXGE_HW_OK)
goto exit;
val64 = readq(&legacy_reg->toc_first_pointer);
toc = bar0 + val64;
exit:
return toc;
}
/*
* __vxge_hw_device_reg_addr_get
* This routine sets the swapper and reads the toc pointer and initializes the
* register location pointers in the device object. It waits until the ric is
* completed initializing registers.
*/
static enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
{
u64 val64;
u32 i;
enum vxge_hw_status status = VXGE_HW_OK;
hldev->legacy_reg = hldev->bar0;
hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
if (hldev->toc_reg == NULL) {
status = VXGE_HW_FAIL;
goto exit;
}
val64 = readq(&hldev->toc_reg->toc_common_pointer);
hldev->common_reg = hldev->bar0 + val64;
val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
hldev->mrpcim_reg = hldev->bar0 + val64;
for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
hldev->srpcim_reg[i] = hldev->bar0 + val64;
}
for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
}
for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
hldev->vpath_reg[i] = hldev->bar0 + val64;
}
val64 = readq(&hldev->toc_reg->toc_kdfc);
switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
case 0:
hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
break;
default:
break;
}
status = __vxge_hw_device_vpath_reset_in_prog_check(
(u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
exit:
return status;
}
/*
* __vxge_hw_device_access_rights_get: Get Access Rights of the driver
* This routine returns the Access Rights of the driver
*/
static u32
__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
{
u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
switch (host_type) {
case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
if (func_id == 0) {
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
}
break;
case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
break;
case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
break;
case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
break;
case VXGE_HW_SR_VH_FUNCTION0:
case VXGE_HW_VH_NORMAL_FUNCTION:
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
break;
}
return access_rights;
}
/*
* __vxge_hw_device_is_privilaged
* This routine checks if the device function is privilaged or not
*/
enum vxge_hw_status
__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
{
if (__vxge_hw_device_access_rights_get(host_type,
func_id) &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
return VXGE_HW_OK;
else
return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
}
/*
* __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
* Returns the function number of the vpath.
*/
static u32
__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
{
u64 val64;
val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
return
(u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
}
/*
* __vxge_hw_device_host_info_get
* This routine returns the host type assignments
*/
static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
{
u64 val64;
u32 i;
val64 = readq(&hldev->common_reg->host_type_assignments);
hldev->host_type =
(u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
continue;
hldev->func_id =
__vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
hldev->access_rights = __vxge_hw_device_access_rights_get(
hldev->host_type, hldev->func_id);
hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
hldev->first_vp_id = i;
break;
}
}
/*
* __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
* link width and signalling rate.
*/
static enum vxge_hw_status
__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
{
struct pci_dev *dev = hldev->pdev;
u16 lnk;
/* Get the negotiated link width and speed from PCI config space */
pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
return VXGE_HW_ERR_INVALID_PCI_INFO;
switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
case PCIE_LNK_WIDTH_RESRV:
case PCIE_LNK_X1:
case PCIE_LNK_X2:
case PCIE_LNK_X4:
case PCIE_LNK_X8:
break;
default:
return VXGE_HW_ERR_INVALID_PCI_INFO;
}
return VXGE_HW_OK;
}
/*
* __vxge_hw_device_initialize
* Initialize Titan-V hardware.
*/
static enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
{
enum vxge_hw_status status = VXGE_HW_OK;
if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
hldev->func_id)) {
/* Validate the pci-e link width and speed */
status = __vxge_hw_verify_pci_e_info(hldev);
if (status != VXGE_HW_OK)
goto exit;
}
exit:
return status;
}
/*
* __vxge_hw_vpath_fw_ver_get - Get the fw version
* Returns FW Version
*/
static enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_device_hw_info *hw_info)
{
struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
u64 data0, data1 = 0, steer_ctrl = 0;
enum vxge_hw_status status;
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
goto exit;
fw_date->day =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
fw_date->month =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
fw_date->year =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
fw_date->month, fw_date->day, fw_date->year);
fw_version->major =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
fw_version->minor =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
fw_version->build =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
fw_version->major, fw_version->minor, fw_version->build);
flash_date->day =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
flash_date->month =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
flash_date->year =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
flash_date->month, flash_date->day, flash_date->year);
flash_version->major =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
flash_version->minor =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
flash_version->build =
(u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
flash_version->major, flash_version->minor,
flash_version->build);
exit:
return status;
}
/*
* __vxge_hw_vpath_card_info_get - Get the serial numbers,
* part number and product description.
*/
static enum vxge_hw_status
__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_device_hw_info *hw_info)
{
enum vxge_hw_status status;
u64 data0, data1 = 0, steer_ctrl = 0;
u8 *serial_number = hw_info->serial_number;
u8 *part_number = hw_info->part_number;
u8 *product_desc = hw_info->product_desc;
u32 i, j = 0;
data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
return status;
((u64 *)serial_number)[0] = be64_to_cpu(data0);
((u64 *)serial_number)[1] = be64_to_cpu(data1);
data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
data1 = steer_ctrl = 0;
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
return status;
((u64 *)part_number)[0] = be64_to_cpu(data0);
((u64 *)part_number)[1] = be64_to_cpu(data1);
for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
data0 = i;
data1 = steer_ctrl = 0;
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
return status;
((u64 *)product_desc)[j++] = be64_to_cpu(data0);
((u64 *)product_desc)[j++] = be64_to_cpu(data1);
}
return status;
}
/*
* __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
* Returns pci function mode
*/
static enum vxge_hw_status
__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_device_hw_info *hw_info)
{
u64 data0, data1 = 0, steer_ctrl = 0;
enum vxge_hw_status status;
data0 = 0;
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_FW_API_GET_FUNC_MODE,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
return status;
hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
return status;
}
/*
* __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
* from MAC address table.
*/
static enum vxge_hw_status
__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
u8 *macaddr, u8 *macaddr_mask)
{
u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
data0 = 0, data1 = 0, steer_ctrl = 0;
enum vxge_hw_status status;
int i;
do {
status = vxge_hw_vpath_fw_api(vpath, action,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
goto exit;
data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
data1);
for (i = ETH_ALEN; i > 0; i--) {
macaddr[i - 1] = (u8) (data0 & 0xFF);
data0 >>= 8;
macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
data1 >>= 8;
}
action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
data0 = 0, data1 = 0, steer_ctrl = 0;
} while (!is_valid_ether_addr(macaddr));
exit:
return status;
}
/**
* vxge_hw_device_hw_info_get - Get the hw information
* Returns the vpath mask that has the bits set for each vpath allocated
* for the driver, FW version information, and the first mac address for
* each vpath
*/
enum vxge_hw_status __devinit
vxge_hw_device_hw_info_get(void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info)
{
u32 i;
u64 val64;
struct vxge_hw_toc_reg __iomem *toc;
struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
struct vxge_hw_common_reg __iomem *common_reg;
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
enum vxge_hw_status status;
struct __vxge_hw_virtualpath vpath;
memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
toc = __vxge_hw_device_toc_get(bar0);
if (toc == NULL) {
status = VXGE_HW_ERR_CRITICAL;
goto exit;
}
val64 = readq(&toc->toc_common_pointer);
common_reg = bar0 + val64;
status = __vxge_hw_device_vpath_reset_in_prog_check(
(u64 __iomem *)&common_reg->vpath_rst_in_prog);
if (status != VXGE_HW_OK)
goto exit;
hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
val64 = readq(&common_reg->host_type_assignments);
hw_info->host_type =
(u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
continue;
val64 = readq(&toc->toc_vpmgmt_pointer[i]);
vpmgmt_reg = bar0 + val64;
hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
if (__vxge_hw_device_access_rights_get(hw_info->host_type,
hw_info->func_id) &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
val64 = readq(&toc->toc_mrpcim_pointer);
mrpcim_reg = bar0 + val64;
writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
wmb();
}
val64 = readq(&toc->toc_vpath_pointer[i]);
spin_lock_init(&vpath.lock);
vpath.vp_reg = bar0 + val64;
vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
goto exit;
break;
}
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
continue;
val64 = readq(&toc->toc_vpath_pointer[i]);
vpath.vp_reg = bar0 + val64;
vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
status = __vxge_hw_vpath_addr_get(&vpath,
hw_info->mac_addrs[i],
hw_info->mac_addr_masks[i]);
if (status != VXGE_HW_OK)
goto exit;
}
exit:
return status;
}
/*
* __vxge_hw_blockpool_destroy - Deallocates the block pool
*/
static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
{
struct __vxge_hw_device *hldev;
struct list_head *p, *n;
u16 ret;
if (blockpool == NULL) {
ret = 1;
goto exit;
}
hldev = blockpool->hldev;
list_for_each_safe(p, n, &blockpool->free_block_list) {
pci_unmap_single(hldev->pdev,
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
((struct __vxge_hw_blockpool_entry *)p)->length,
PCI_DMA_BIDIRECTIONAL);
vxge_os_dma_free(hldev->pdev,
((struct __vxge_hw_blockpool_entry *)p)->memblock,
&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
kfree(p);
blockpool->pool_size--;
}
list_for_each_safe(p, n, &blockpool->free_entry_list) {
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
kfree((void *)p);
}
ret = 0;
exit:
return;
}
/*
* __vxge_hw_blockpool_create - Create block pool
*/
static enum vxge_hw_status
__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
struct __vxge_hw_blockpool *blockpool,
u32 pool_size,
u32 pool_max)
{
u32 i;
struct __vxge_hw_blockpool_entry *entry = NULL;
void *memblock;
dma_addr_t dma_addr;
struct pci_dev *dma_handle;
struct pci_dev *acc_handle;
enum vxge_hw_status status = VXGE_HW_OK;
if (blockpool == NULL) {
status = VXGE_HW_FAIL;
goto blockpool_create_exit;
}
blockpool->hldev = hldev;
blockpool->block_size = VXGE_HW_BLOCK_SIZE;
blockpool->pool_size = 0;
blockpool->pool_max = pool_max;
blockpool->req_out = 0;
INIT_LIST_HEAD(&blockpool->free_block_list);
INIT_LIST_HEAD(&blockpool->free_entry_list);
for (i = 0; i < pool_size + pool_max; i++) {
entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
GFP_KERNEL);
if (entry == NULL) {
__vxge_hw_blockpool_destroy(blockpool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto blockpool_create_exit;
}
list_add(&entry->item, &blockpool->free_entry_list);
}
for (i = 0; i < pool_size; i++) {
memblock = vxge_os_dma_malloc(
hldev->pdev,
VXGE_HW_BLOCK_SIZE,
&dma_handle,
&acc_handle);
if (memblock == NULL) {
__vxge_hw_blockpool_destroy(blockpool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto blockpool_create_exit;
}
dma_addr = pci_map_single(hldev->pdev, memblock,
VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
if (unlikely(pci_dma_mapping_error(hldev->pdev,
dma_addr))) {
vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
__vxge_hw_blockpool_destroy(blockpool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto blockpool_create_exit;
}
if (!list_empty(&blockpool->free_entry_list))
entry = (struct __vxge_hw_blockpool_entry *)
list_first_entry(&blockpool->free_entry_list,
struct __vxge_hw_blockpool_entry,
item);
if (entry == NULL)
entry =
kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
GFP_KERNEL);
if (entry != NULL) {
list_del(&entry->item);
entry->length = VXGE_HW_BLOCK_SIZE;
entry->memblock = memblock;
entry->dma_addr = dma_addr;
entry->acc_handle = acc_handle;
entry->dma_handle = dma_handle;
list_add(&entry->item,
&blockpool->free_block_list);
blockpool->pool_size++;
} else {
__vxge_hw_blockpool_destroy(blockpool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto blockpool_create_exit;
}
}
blockpool_create_exit:
return status;
}
/*
* __vxge_hw_device_fifo_config_check - Check fifo configuration.
* Check the fifo configuration
*/
static enum vxge_hw_status
__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
{
if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
(fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
return VXGE_HW_BADCFG_FIFO_BLOCKS;
return VXGE_HW_OK;
}
/*
* __vxge_hw_device_vpath_config_check - Check vpath configuration.
* Check the vpath configuration
*/
static enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
{
enum vxge_hw_status status;
if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
(vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
if (status != VXGE_HW_OK)
return status;
if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
(vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
return VXGE_HW_BADCFG_VPATH_MTU;
if ((vp_config->rpa_strip_vlan_tag !=
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
(vp_config->rpa_strip_vlan_tag !=
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
(vp_config->rpa_strip_vlan_tag !=
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
return VXGE_HW_OK;
}
/*
* __vxge_hw_device_config_check - Check device configuration.
* Check the device configuration
*/
static enum vxge_hw_status
__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
{
u32 i;
enum vxge_hw_status status;
if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
(new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
(new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
(new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
return VXGE_HW_BADCFG_INTR_MODE;
if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
(new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
return VXGE_HW_BADCFG_RTS_MAC_EN;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
status = __vxge_hw_device_vpath_config_check(
&new_config->vp_config[i]);
if (status != VXGE_HW_OK)
return status;
}
return VXGE_HW_OK;
}
/*
* vxge_hw_device_initialize - Initialize Titan device.
* Initialize Titan device. Note that all the arguments of this public API
* are 'IN', including @hldev. Driver cooperates with
* OS to find new Titan device, locate its PCI and memory spaces.
*
* When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
* to enable the latter to perform Titan hardware initialization.
*/
enum vxge_hw_status __devinit
vxge_hw_device_initialize(
struct __vxge_hw_device **devh,
struct vxge_hw_device_attr *attr,
struct vxge_hw_device_config *device_config)
{
u32 i;
u32 nblocks = 0;
struct __vxge_hw_device *hldev = NULL;
enum vxge_hw_status status = VXGE_HW_OK;
status = __vxge_hw_device_config_check(device_config);
if (status != VXGE_HW_OK)
goto exit;
hldev = vzalloc(sizeof(struct __vxge_hw_device));
if (hldev == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
hldev->magic = VXGE_HW_DEVICE_MAGIC;
vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
/* apply config */
memcpy(&hldev->config, device_config,
sizeof(struct vxge_hw_device_config));
hldev->bar0 = attr->bar0;
hldev->pdev = attr->pdev;
hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
__vxge_hw_device_pci_e_init(hldev);
status = __vxge_hw_device_reg_addr_get(hldev);
if (status != VXGE_HW_OK) {
vfree(hldev);
goto exit;
}
__vxge_hw_device_host_info_get(hldev);
/* Incrementing for stats blocks */
nblocks++;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
continue;
if (device_config->vp_config[i].ring.enable ==
VXGE_HW_RING_ENABLE)
nblocks += device_config->vp_config[i].ring.ring_blocks;
if (device_config->vp_config[i].fifo.enable ==
VXGE_HW_FIFO_ENABLE)
nblocks += device_config->vp_config[i].fifo.fifo_blocks;
nblocks++;
}
if (__vxge_hw_blockpool_create(hldev,
&hldev->block_pool,
device_config->dma_blockpool_initial + nblocks,
device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
vxge_hw_device_terminate(hldev);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
status = __vxge_hw_device_initialize(hldev);
if (status != VXGE_HW_OK) {
vxge_hw_device_terminate(hldev);
goto exit;
}
*devh = hldev;
exit:
return status;
}
/*
* vxge_hw_device_terminate - Terminate Titan device.
* Terminate HW device.
*/
void
vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
{
vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
hldev->magic = VXGE_HW_DEVICE_DEAD;
__vxge_hw_blockpool_destroy(&hldev->block_pool);
vfree(hldev);
}
/*
* __vxge_hw_vpath_stats_access - Get the statistics from the given location
* and offset and perform an operation
*/
static enum vxge_hw_status
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
u32 operation, u32 offset, u64 *stat)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_vpath_reg __iomem *vp_reg;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto vpath_stats_access_exit;
}
vp_reg = vpath->vp_reg;
val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
status = __vxge_hw_pio_mem_write64(val64,
&vp_reg->xmac_stats_access_cmd,
VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
vpath->hldev->config.device_poll_millis);
if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
*stat = readq(&vp_reg->xmac_stats_access_data);
else
*stat = 0;
vpath_stats_access_exit:
return status;
}
/*
* __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
*/
static enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
{
u64 *val64;
int i;
u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
enum vxge_hw_status status = VXGE_HW_OK;
val64 = (u64 *)vpath_tx_stats;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
status = __vxge_hw_vpath_stats_access(vpath,
VXGE_HW_STATS_OP_READ,
offset, val64);
if (status != VXGE_HW_OK)
goto exit;
offset++;
val64++;
}
exit:
return status;
}
/*
* __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
*/
static enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
{
u64 *val64;
enum vxge_hw_status status = VXGE_HW_OK;
int i;
u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
val64 = (u64 *) vpath_rx_stats;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
status = __vxge_hw_vpath_stats_access(vpath,
VXGE_HW_STATS_OP_READ,
offset >> 3, val64);
if (status != VXGE_HW_OK)
goto exit;
offset += 8;
val64++;
}
exit:
return status;
}
/*
* __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
*/
static enum vxge_hw_status
__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_vpath_stats_hw_info *hw_stats)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_vpath_reg __iomem *vp_reg;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
vp_reg = vpath->vp_reg;
val64 = readq(&vp_reg->vpath_debug_stats0);
hw_stats->ini_num_mwr_sent =
(u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
val64 = readq(&vp_reg->vpath_debug_stats1);
hw_stats->ini_num_mrd_sent =
(u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
val64 = readq(&vp_reg->vpath_debug_stats2);
hw_stats->ini_num_cpl_rcvd =
(u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
val64 = readq(&vp_reg->vpath_debug_stats3);
hw_stats->ini_num_mwr_byte_sent =
VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
val64 = readq(&vp_reg->vpath_debug_stats4);
hw_stats->ini_num_cpl_byte_rcvd =
VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
val64 = readq(&vp_reg->vpath_debug_stats5);
hw_stats->wrcrdtarb_xoff =
(u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
val64 = readq(&vp_reg->vpath_debug_stats6);
hw_stats->rdcrdtarb_xoff =
(u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
val64 = readq(&vp_reg->vpath_genstats_count01);
hw_stats->vpath_genstats_count0 =
(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
val64);
val64 = readq(&vp_reg->vpath_genstats_count01);
hw_stats->vpath_genstats_count1 =
(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
val64);
val64 = readq(&vp_reg->vpath_genstats_count23);
hw_stats->vpath_genstats_count2 =
(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
val64);
val64 = readq(&vp_reg->vpath_genstats_count01);
hw_stats->vpath_genstats_count3 =
(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
val64);
val64 = readq(&vp_reg->vpath_genstats_count4);
hw_stats->vpath_genstats_count4 =
(u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
val64);
val64 = readq(&vp_reg->vpath_genstats_count5);
hw_stats->vpath_genstats_count5 =
(u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
val64);
status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
if (status != VXGE_HW_OK)
goto exit;
VXGE_HW_VPATH_STATS_PIO_READ(
VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
hw_stats->prog_event_vnum0 =
(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
hw_stats->prog_event_vnum1 =
(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
VXGE_HW_VPATH_STATS_PIO_READ(
VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
hw_stats->prog_event_vnum2 =
(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
hw_stats->prog_event_vnum3 =
(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
val64 = readq(&vp_reg->rx_multi_cast_stats);
hw_stats->rx_multi_cast_frame_discard =
(u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
val64 = readq(&vp_reg->rx_frm_transferred);
hw_stats->rx_frm_transferred =
(u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
val64 = readq(&vp_reg->rxd_returned);
hw_stats->rxd_returned =
(u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
val64 = readq(&vp_reg->dbg_stats_rx_mpa);
hw_stats->rx_mpa_len_fail_frms =
(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
hw_stats->rx_mpa_mrk_fail_frms =
(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
hw_stats->rx_mpa_crc_fail_frms =
(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
val64 = readq(&vp_reg->dbg_stats_rx_fau);
hw_stats->rx_permitted_frms =
(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
hw_stats->rx_vp_reset_discarded_frms =
(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
hw_stats->rx_wol_frms =
(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
hw_stats->tx_vp_reset_discarded_frms =
(u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
val64);
exit:
return status;
}
/*
* vxge_hw_device_stats_get - Get the device hw statistics.
* Returns the vpath h/w stats for the device.
*/
enum vxge_hw_status
vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
struct vxge_hw_device_stats_hw_info *hw_stats)
{
u32 i;
enum vxge_hw_status status = VXGE_HW_OK;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
(hldev->virtual_paths[i].vp_open ==
VXGE_HW_VP_NOT_OPEN))
continue;
memcpy(hldev->virtual_paths[i].hw_stats_sav,
hldev->virtual_paths[i].hw_stats,
sizeof(struct vxge_hw_vpath_stats_hw_info));
status = __vxge_hw_vpath_stats_get(
&hldev->virtual_paths[i],
hldev->virtual_paths[i].hw_stats);
}
memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
sizeof(struct vxge_hw_device_stats_hw_info));
return status;
}
/*
* vxge_hw_driver_stats_get - Get the device sw statistics.
* Returns the vpath s/w stats for the device.
*/
enum vxge_hw_status vxge_hw_driver_stats_get(
struct __vxge_hw_device *hldev,
struct vxge_hw_device_stats_sw_info *sw_stats)
{
enum vxge_hw_status status = VXGE_HW_OK;
memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
sizeof(struct vxge_hw_device_stats_sw_info));
return status;
}
/*
* vxge_hw_mrpcim_stats_access - Access the statistics from the given location
* and offset and perform an operation
* Get the statistics from the given location and offset.
*/
enum vxge_hw_status
vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
u32 operation, u32 location, u32 offset, u64 *stat)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
status = __vxge_hw_device_is_privilaged(hldev->host_type,
hldev->func_id);
if (status != VXGE_HW_OK)
goto exit;
val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
status = __vxge_hw_pio_mem_write64(val64,
&hldev->mrpcim_reg->xmac_stats_sys_cmd,
VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
hldev->config.device_poll_millis);
if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
*stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
else
*stat = 0;
exit:
return status;
}
/*
* vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
* Get the Statistics on aggregate port
*/
static enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_aggr_stats *aggr_stats)
{
u64 *val64;
int i;
u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
enum vxge_hw_status status = VXGE_HW_OK;
val64 = (u64 *)aggr_stats;
status = __vxge_hw_device_is_privilaged(hldev->host_type,
hldev->func_id);
if (status != VXGE_HW_OK)
goto exit;
for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
status = vxge_hw_mrpcim_stats_access(hldev,
VXGE_HW_STATS_OP_READ,
VXGE_HW_STATS_LOC_AGGR,
((offset + (104 * port)) >> 3), val64);
if (status != VXGE_HW_OK)
goto exit;
offset += 8;
val64++;
}
exit:
return status;
}
/*
* vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
* Get the Statistics on port
*/
static enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_port_stats *port_stats)
{
u64 *val64;
enum vxge_hw_status status = VXGE_HW_OK;
int i;
u32 offset = 0x0;
val64 = (u64 *) port_stats;
status = __vxge_hw_device_is_privilaged(hldev->host_type,
hldev->func_id);
if (status != VXGE_HW_OK)
goto exit;
for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
status = vxge_hw_mrpcim_stats_access(hldev,
VXGE_HW_STATS_OP_READ,
VXGE_HW_STATS_LOC_AGGR,
((offset + (608 * port)) >> 3), val64);
if (status != VXGE_HW_OK)
goto exit;
offset += 8;
val64++;
}
exit:
return status;
}
/*
* vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
* Get the XMAC Statistics
*/
enum vxge_hw_status
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
struct vxge_hw_xmac_stats *xmac_stats)
{
enum vxge_hw_status status = VXGE_HW_OK;
u32 i;
status = vxge_hw_device_xmac_aggr_stats_get(hldev,
0, &xmac_stats->aggr_stats[0]);
if (status != VXGE_HW_OK)
goto exit;
status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1, &xmac_stats->aggr_stats[1]);
if (status != VXGE_HW_OK)
goto exit;
for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
status = vxge_hw_device_xmac_port_stats_get(hldev,
i, &xmac_stats->port_stats[i]);
if (status != VXGE_HW_OK)
goto exit;
}
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
continue;
status = __vxge_hw_vpath_xmac_tx_stats_get(
&hldev->virtual_paths[i],
&xmac_stats->vpath_tx_stats[i]);
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_xmac_rx_stats_get(
&hldev->virtual_paths[i],
&xmac_stats->vpath_rx_stats[i]);
if (status != VXGE_HW_OK)
goto exit;
}
exit:
return status;
}
/*
* vxge_hw_device_debug_set - Set the debug module, level and timestamp
* This routine is used to dynamically change the debug output
*/
void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
enum vxge_debug_level level, u32 mask)
{
if (hldev == NULL)
return;
#if defined(VXGE_DEBUG_TRACE_MASK) || \
defined(VXGE_DEBUG_ERR_MASK)
hldev->debug_module_mask = mask;
hldev->debug_level = level;
#endif
#if defined(VXGE_DEBUG_ERR_MASK)
hldev->level_err = level & VXGE_ERR;
#endif
#if defined(VXGE_DEBUG_TRACE_MASK)
hldev->level_trace = level & VXGE_TRACE;
#endif
}
/*
* vxge_hw_device_error_level_get - Get the error level
* This routine returns the current error level set
*/
u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
{
#if defined(VXGE_DEBUG_ERR_MASK)
if (hldev == NULL)
return VXGE_ERR;
else
return hldev->level_err;
#else
return 0;
#endif
}
/*
* vxge_hw_device_trace_level_get - Get the trace level
* This routine returns the current trace level set
*/
u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
{
#if defined(VXGE_DEBUG_TRACE_MASK)
if (hldev == NULL)
return VXGE_TRACE;
else
return hldev->level_trace;
#else
return 0;
#endif
}
/*
* vxge_hw_getpause_data -Pause frame frame generation and reception.
* Returns the Pause frame generation and reception capability of the NIC.
*/
enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
u32 port, u32 *tx, u32 *rx)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
status = VXGE_HW_ERR_INVALID_DEVICE;
goto exit;
}
if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
status = VXGE_HW_ERR_INVALID_PORT;
goto exit;
}
if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
goto exit;
}
val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
*tx = 1;
if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
*rx = 1;
exit:
return status;
}
/*
* vxge_hw_device_setpause_data - set/reset pause frame generation.
* It can be used to set or reset Pause frame generation or reception
* support of the NIC.
*/
enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
u32 port, u32 tx, u32 rx)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
status = VXGE_HW_ERR_INVALID_DEVICE;
goto exit;
}
if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
status = VXGE_HW_ERR_INVALID_PORT;
goto exit;
}
status = __vxge_hw_device_is_privilaged(hldev->host_type,
hldev->func_id);
if (status != VXGE_HW_OK)
goto exit;
val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
if (tx)
val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
else
val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
if (rx)
val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
else
val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
exit:
return status;
}
u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
{
struct pci_dev *dev = hldev->pdev;
u16 lnk;
pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
}
/*
* __vxge_hw_ring_block_memblock_idx - Return the memblock index
* This function returns the index of memory block
*/
static inline u32
__vxge_hw_ring_block_memblock_idx(u8 *block)
{
return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
}
/*
* __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
* This function sets index to a memory block
*/
static inline void
__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
{
*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
}
/*
* __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
* in RxD block
* Sets the next block pointer in RxD block
*/
static inline void
__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
{
*((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
}
/*
* __vxge_hw_ring_first_block_address_get - Returns the dma address of the
* first block
* Returns the dma address of the first RxD block
*/
static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
{
struct vxge_hw_mempool_dma *dma_object;
dma_object = ring->mempool->memblocks_dma_arr;
vxge_assert(dma_object != NULL);
return dma_object->addr;
}
/*
* __vxge_hw_ring_item_dma_addr - Return the dma address of an item
* This function returns the dma address of a given item
*/
static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
void *item)
{
u32 memblock_idx;
void *memblock;
struct vxge_hw_mempool_dma *memblock_dma_object;
ptrdiff_t dma_item_offset;
/* get owner memblock index */
memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
/* get owner memblock by memblock index */
memblock = mempoolh->memblocks_arr[memblock_idx];
/* get memblock DMA object by memblock index */
memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
/* calculate offset in the memblock of this item */
dma_item_offset = (u8 *)item - (u8 *)memblock;
return memblock_dma_object->addr + dma_item_offset;
}
/*
* __vxge_hw_ring_rxdblock_link - Link the RxD blocks
* This function returns the dma address of a given item
*/
static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
struct __vxge_hw_ring *ring, u32 from,
u32 to)
{
u8 *to_item , *from_item;
dma_addr_t to_dma;
/* get "from" RxD block */
from_item = mempoolh->items_arr[from];
vxge_assert(from_item);
/* get "to" RxD block */
to_item = mempoolh->items_arr[to];
vxge_assert(to_item);
/* return address of the beginning of previous RxD block */
to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
/* set next pointer for this RxD block to point on
* previous item's DMA start address */
__vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
}
/*
* __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
* block callback
* This function is callback passed to __vxge_hw_mempool_create to create memory
* pool for RxD block
*/
static void
__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
u32 memblock_index,
struct vxge_hw_mempool_dma *dma_object,
u32 index, u32 is_last)
{
u32 i;
void *item = mempoolh->items_arr[index];
struct __vxge_hw_ring *ring =
(struct __vxge_hw_ring *)mempoolh->userdata;
/* format rxds array */
for (i = 0; i < ring->rxds_per_block; i++) {
void *rxdblock_priv;
void *uld_priv;
struct vxge_hw_ring_rxd_1 *rxdp;
u32 reserve_index = ring->channel.reserve_ptr -
(index * ring->rxds_per_block + i + 1);
u32 memblock_item_idx;
ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
i * ring->rxd_size;
/* Note: memblock_item_idx is index of the item within
* the memblock. For instance, in case of three RxD-blocks
* per memblock this value can be 0, 1 or 2. */
rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
memblock_index, item,
&memblock_item_idx);
rxdp = ring->channel.reserve_arr[reserve_index];
uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
/* pre-format Host_Control */
rxdp->host_control = (u64)(size_t)uld_priv;
}
__vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
if (is_last) {
/* link last one with first one */
__vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
}
if (index > 0) {
/* link this RxD block with previous one */
__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
}
}
/*
* __vxge_hw_ring_replenish - Initial replenish of RxDs
* This function replenishes the RxDs from reserve array to work array
*/
enum vxge_hw_status
vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
{
void *rxd;
struct __vxge_hw_channel *channel;
enum vxge_hw_status status = VXGE_HW_OK;
channel = &ring->channel;
while (vxge_hw_channel_dtr_count(channel) > 0) {
status = vxge_hw_ring_rxd_reserve(ring, &rxd);
vxge_assert(status == VXGE_HW_OK);
if (ring->rxd_init) {
status = ring->rxd_init(rxd, channel->userdata);
if (status != VXGE_HW_OK) {
vxge_hw_ring_rxd_free(ring, rxd);
goto exit;
}
}
vxge_hw_ring_rxd_post(ring, rxd);
}
status = VXGE_HW_OK;
exit:
return status;
}
/*
* __vxge_hw_channel_allocate - Allocate memory for channel
* This function allocates required memory for the channel and various arrays
* in the channel
*/
static struct __vxge_hw_channel *
__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
enum __vxge_hw_channel_type type,
u32 length, u32 per_dtr_space,
void *userdata)
{
struct __vxge_hw_channel *channel;
struct __vxge_hw_device *hldev;
int size = 0;
u32 vp_id;
hldev = vph->vpath->hldev;
vp_id = vph->vpath->vp_id;
switch (type) {
case VXGE_HW_CHANNEL_TYPE_FIFO:
size = sizeof(struct __vxge_hw_fifo);
break;
case VXGE_HW_CHANNEL_TYPE_RING:
size = sizeof(struct __vxge_hw_ring);
break;
default:
break;
}
channel = kzalloc(size, GFP_KERNEL);
if (channel == NULL)
goto exit0;
INIT_LIST_HEAD(&channel->item);
channel->common_reg = hldev->common_reg;
channel->first_vp_id = hldev->first_vp_id;
channel->type = type;
channel->devh = hldev;
channel->vph = vph;
channel->userdata = userdata;
channel->per_dtr_space = per_dtr_space;
channel->length = length;
channel->vp_id = vp_id;
channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
if (channel->work_arr == NULL)
goto exit1;
channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
if (channel->free_arr == NULL)
goto exit1;
channel->free_ptr = length;
channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
if (channel->reserve_arr == NULL)
goto exit1;
channel->reserve_ptr = length;
channel->reserve_top = 0;
channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
if (channel->orig_arr == NULL)
goto exit1;
return channel;
exit1:
__vxge_hw_channel_free(channel);
exit0:
return NULL;
}
/*
* vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
* Adds a block to block pool
*/
static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
void *block_addr,
u32 length,
struct pci_dev *dma_h,
struct pci_dev *acc_handle)
{
struct __vxge_hw_blockpool *blockpool;
struct __vxge_hw_blockpool_entry *entry = NULL;
dma_addr_t dma_addr;
enum vxge_hw_status status = VXGE_HW_OK;
u32 req_out;
blockpool = &devh->block_pool;
if (block_addr == NULL) {
blockpool->req_out--;
status = VXGE_HW_FAIL;
goto exit;
}
dma_addr = pci_map_single(devh->pdev, block_addr, length,
PCI_DMA_BIDIRECTIONAL);
if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
blockpool->req_out--;
status = VXGE_HW_FAIL;
goto exit;
}
if (!list_empty(&blockpool->free_entry_list))
entry = (struct __vxge_hw_blockpool_entry *)
list_first_entry(&blockpool->free_entry_list,
struct __vxge_hw_blockpool_entry,
item);
if (entry == NULL)
entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
else
list_del(&entry->item);
if (entry != NULL) {
entry->length = length;
entry->memblock = block_addr;
entry->dma_addr = dma_addr;
entry->acc_handle = acc_handle;
entry->dma_handle = dma_h;
list_add(&entry->item, &blockpool->free_block_list);
blockpool->pool_size++;
status = VXGE_HW_OK;
} else
status = VXGE_HW_ERR_OUT_OF_MEMORY;
blockpool->req_out--;
req_out = blockpool->req_out;
exit:
return;
}
static inline void
vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
{
gfp_t flags;
void *vaddr;
if (in_interrupt())
flags = GFP_ATOMIC | GFP_DMA;
else
flags = GFP_KERNEL | GFP_DMA;
vaddr = kmalloc((size), flags);
vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
}
/*
* __vxge_hw_blockpool_blocks_add - Request additional blocks
*/
static
void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
{
u32 nreq = 0, i;
if ((blockpool->pool_size + blockpool->req_out) <
VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
blockpool->req_out += nreq;
}
for (i = 0; i < nreq; i++)
vxge_os_dma_malloc_async(
((struct __vxge_hw_device *)blockpool->hldev)->pdev,
blockpool->hldev, VXGE_HW_BLOCK_SIZE);
}
/*
* __vxge_hw_blockpool_malloc - Allocate a memory block from pool
* Allocates a block of memory of given size, either from block pool
* or by calling vxge_os_dma_malloc()
*/
static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
struct vxge_hw_mempool_dma *dma_object)
{
struct __vxge_hw_blockpool_entry *entry = NULL;
struct __vxge_hw_blockpool *blockpool;
void *memblock = NULL;
enum vxge_hw_status status = VXGE_HW_OK;
blockpool = &devh->block_pool;
if (size != blockpool->block_size) {
memblock = vxge_os_dma_malloc(devh->pdev, size,
&dma_object->handle,
&dma_object->acc_handle);
if (memblock == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
dma_object->addr = pci_map_single(devh->pdev, memblock, size,
PCI_DMA_BIDIRECTIONAL);
if (unlikely(pci_dma_mapping_error(devh->pdev,
dma_object->addr))) {
vxge_os_dma_free(devh->pdev, memblock,
&dma_object->acc_handle);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
} else {
if (!list_empty(&blockpool->free_block_list))
entry = (struct __vxge_hw_blockpool_entry *)
list_first_entry(&blockpool->free_block_list,
struct __vxge_hw_blockpool_entry,
item);
if (entry != NULL) {
list_del(&entry->item);
dma_object->addr = entry->dma_addr;
dma_object->handle = entry->dma_handle;
dma_object->acc_handle = entry->acc_handle;
memblock = entry->memblock;
list_add(&entry->item,
&blockpool->free_entry_list);
blockpool->pool_size--;
}
if (memblock != NULL)
__vxge_hw_blockpool_blocks_add(blockpool);
}
exit:
return memblock;
}
/*
* __vxge_hw_blockpool_blocks_remove - Free additional blocks
*/
static void
__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
{
struct list_head *p, *n;
list_for_each_safe(p, n, &blockpool->free_block_list) {
if (blockpool->pool_size < blockpool->pool_max)
break;
pci_unmap_single(
((struct __vxge_hw_device *)blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
((struct __vxge_hw_blockpool_entry *)p)->length,
PCI_DMA_BIDIRECTIONAL);
vxge_os_dma_free(
((struct __vxge_hw_device *)blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->memblock,
&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
list_add(p, &blockpool->free_entry_list);
blockpool->pool_size--;
}
}
/*
* __vxge_hw_blockpool_free - Frees the memory allcoated with
* __vxge_hw_blockpool_malloc
*/
static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
void *memblock, u32 size,
struct vxge_hw_mempool_dma *dma_object)
{
struct __vxge_hw_blockpool_entry *entry = NULL;
struct __vxge_hw_blockpool *blockpool;
enum vxge_hw_status status = VXGE_HW_OK;
blockpool = &devh->block_pool;
if (size != blockpool->block_size) {
pci_unmap_single(devh->pdev, dma_object->addr, size,
PCI_DMA_BIDIRECTIONAL);
vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
} else {
if (!list_empty(&blockpool->free_entry_list))
entry = (struct __vxge_hw_blockpool_entry *)
list_first_entry(&blockpool->free_entry_list,
struct __vxge_hw_blockpool_entry,
item);
if (entry == NULL)
entry = vmalloc(sizeof(
struct __vxge_hw_blockpool_entry));
else
list_del(&entry->item);
if (entry != NULL) {
entry->length = size;
entry->memblock = memblock;
entry->dma_addr = dma_object->addr;
entry->acc_handle = dma_object->acc_handle;
entry->dma_handle = dma_object->handle;
list_add(&entry->item,
&blockpool->free_block_list);
blockpool->pool_size++;
status = VXGE_HW_OK;
} else
status = VXGE_HW_ERR_OUT_OF_MEMORY;
if (status == VXGE_HW_OK)
__vxge_hw_blockpool_blocks_remove(blockpool);
}
}
/*
* vxge_hw_mempool_destroy
*/
static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
{
u32 i, j;
struct __vxge_hw_device *devh = mempool->devh;
for (i = 0; i < mempool->memblocks_allocated; i++) {
struct vxge_hw_mempool_dma *dma_object;
vxge_assert(mempool->memblocks_arr[i]);
vxge_assert(mempool->memblocks_dma_arr + i);
dma_object = mempool->memblocks_dma_arr + i;
for (j = 0; j < mempool->items_per_memblock; j++) {
u32 index = i * mempool->items_per_memblock + j;
/* to skip last partially filled(if any) memblock */
if (index >= mempool->items_current)
break;
}
vfree(mempool->memblocks_priv_arr[i]);
__vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
mempool->memblock_size, dma_object);
}
vfree(mempool->items_arr);
vfree(mempool->memblocks_dma_arr);
vfree(mempool->memblocks_priv_arr);
vfree(mempool->memblocks_arr);
vfree(mempool);
}
/*
* __vxge_hw_mempool_grow
* Will resize mempool up to %num_allocate value.
*/
static enum vxge_hw_status
__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
u32 *num_allocated)
{
u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
u32 n_items = mempool->items_per_memblock;
u32 start_block_idx = mempool->memblocks_allocated;
u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
enum vxge_hw_status status = VXGE_HW_OK;
*num_allocated = 0;
if (end_block_idx > mempool->memblocks_max) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
for (i = start_block_idx; i < end_block_idx; i++) {
u32 j;
u32 is_last = ((end_block_idx - 1) == i);
struct vxge_hw_mempool_dma *dma_object =
mempool->memblocks_dma_arr + i;
void *the_memblock;
/* allocate memblock's private part. Each DMA memblock
* has a space allocated for item's private usage upon
* mempool's user request. Each time mempool grows, it will
* allocate new memblock and its private part at once.
* This helps to minimize memory usage a lot. */
mempool->memblocks_priv_arr[i] =
vzalloc(mempool->items_priv_size * n_items);
if (mempool->memblocks_priv_arr[i] == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
/* allocate DMA-capable memblock */
mempool->memblocks_arr[i] =
__vxge_hw_blockpool_malloc(mempool->devh,
mempool->memblock_size, dma_object);
if (mempool->memblocks_arr[i] == NULL) {
vfree(mempool->memblocks_priv_arr[i]);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
(*num_allocated)++;
mempool->memblocks_allocated++;
memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
the_memblock = mempool->memblocks_arr[i];
/* fill the items hash array */
for (j = 0; j < n_items; j++) {
u32 index = i * n_items + j;
if (first_time && index >= mempool->items_initial)
break;
mempool->items_arr[index] =
((char *)the_memblock + j*mempool->item_size);
/* let caller to do more job on each item */
if (mempool->item_func_alloc != NULL)
mempool->item_func_alloc(mempool, i,
dma_object, index, is_last);
mempool->items_current = index + 1;
}
if (first_time && mempool->items_current ==
mempool->items_initial)
break;
}
exit:
return status;
}
/*
* vxge_hw_mempool_create
* This function will create memory pool object. Pool may grow but will
* never shrink. Pool consists of number of dynamically allocated blocks
* with size enough to hold %items_initial number of items. Memory is
* DMA-able but client must map/unmap before interoperating with the device.
*/
static struct vxge_hw_mempool *
__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
u32 memblock_size,
u32 item_size,
u32 items_priv_size,
u32 items_initial,
u32 items_max,
struct vxge_hw_mempool_cbs *mp_callback,
void *userdata)
{
enum vxge_hw_status status = VXGE_HW_OK;
u32 memblocks_to_allocate;
struct vxge_hw_mempool *mempool = NULL;
u32 allocated;
if (memblock_size < item_size) {
status = VXGE_HW_FAIL;
goto exit;
}
mempool = vzalloc(sizeof(struct vxge_hw_mempool));
if (mempool == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
mempool->devh = devh;
mempool->memblock_size = memblock_size;
mempool->items_max = items_max;
mempool->items_initial = items_initial;
mempool->item_size = item_size;
mempool->items_priv_size = items_priv_size;
mempool->item_func_alloc = mp_callback->item_func_alloc;
mempool->userdata = userdata;
mempool->memblocks_allocated = 0;
mempool->items_per_memblock = memblock_size / item_size;
mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
mempool->items_per_memblock;
/* allocate array of memblocks */
mempool->memblocks_arr =
vzalloc(sizeof(void *) * mempool->memblocks_max);
if (mempool->memblocks_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
/* allocate array of private parts of items per memblocks */
mempool->memblocks_priv_arr =
vzalloc(sizeof(void *) * mempool->memblocks_max);
if (mempool->memblocks_priv_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
/* allocate array of memblocks DMA objects */
mempool->memblocks_dma_arr =
vzalloc(sizeof(struct vxge_hw_mempool_dma) *
mempool->memblocks_max);
if (mempool->memblocks_dma_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
/* allocate hash array of items */
mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
if (mempool->items_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
/* calculate initial number of memblocks */
memblocks_to_allocate = (mempool->items_initial +
mempool->items_per_memblock - 1) /
mempool->items_per_memblock;
/* pre-allocate the mempool */
status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
&allocated);
if (status != VXGE_HW_OK) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
exit:
return mempool;
}
/*
* __vxge_hw_ring_abort - Returns the RxD
* This function terminates the RxDs of ring
*/
static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
{
void *rxdh;
struct __vxge_hw_channel *channel;
channel = &ring->channel;
for (;;) {
vxge_hw_channel_dtr_try_complete(channel, &rxdh);
if (rxdh == NULL)
break;
vxge_hw_channel_dtr_complete(channel);
if (ring->rxd_term)
ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
channel->userdata);
vxge_hw_channel_dtr_free(channel, rxdh);
}
return VXGE_HW_OK;
}
/*
* __vxge_hw_ring_reset - Resets the ring
* This function resets the ring during vpath reset operation
*/
static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_channel *channel;
channel = &ring->channel;
__vxge_hw_ring_abort(ring);
status = __vxge_hw_channel_reset(channel);
if (status != VXGE_HW_OK)
goto exit;
if (ring->rxd_init) {
status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK)
goto exit;
}
exit:
return status;
}
/*
* __vxge_hw_ring_delete - Removes the ring
* This function freeup the memory pool and removes the ring
*/
static enum vxge_hw_status
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_ring *ring = vp->vpath->ringh;
__vxge_hw_ring_abort(ring);
if (ring->mempool)
__vxge_hw_mempool_destroy(ring->mempool);
vp->vpath->ringh = NULL;
__vxge_hw_channel_free(&ring->channel);
return VXGE_HW_OK;
}
/*
* __vxge_hw_ring_create - Create a Ring
* This function creates Ring and initializes it.
*/
static enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_ring_attr *attr)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_ring *ring;
u32 ring_length;
struct vxge_hw_ring_config *config;
struct __vxge_hw_device *hldev;
u32 vp_id;
struct vxge_hw_mempool_cbs ring_mp_callback;
if ((vp == NULL) || (attr == NULL)) {
status = VXGE_HW_FAIL;
goto exit;
}
hldev = vp->vpath->hldev;
vp_id = vp->vpath->vp_id;
config = &hldev->config.vp_config[vp_id].ring;
ring_length = config->ring_blocks *
vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
VXGE_HW_CHANNEL_TYPE_RING,
ring_length,
attr->per_rxd_space,
attr->userdata);
if (ring == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
vp->vpath->ringh = ring;
ring->vp_id = vp_id;
ring->vp_reg = vp->vpath->vp_reg;
ring->common_reg = hldev->common_reg;
ring->stats = &vp->vpath->sw_stats->ring_stats;
ring->config = config;
ring->callback = attr->callback;
ring->rxd_init = attr->rxd_init;
ring->rxd_term = attr->rxd_term;
ring->buffer_mode = config->buffer_mode;
ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
ring->rxds_limit = config->rxds_limit;
ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
ring->rxd_priv_size =
sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
ring->per_rxd_space = attr->per_rxd_space;
ring->rxd_priv_size =
((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
/* how many RxDs can fit into one block. Depends on configured
* buffer_mode. */
ring->rxds_per_block =
vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
/* calculate actual RxD block private size */
ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
ring->mempool = __vxge_hw_mempool_create(hldev,
VXGE_HW_BLOCK_SIZE,
VXGE_HW_BLOCK_SIZE,
ring->rxdblock_priv_size,
ring->config->ring_blocks,
ring->config->ring_blocks,
&ring_mp_callback,
ring);
if (ring->mempool == NULL) {
__vxge_hw_ring_delete(vp);
return VXGE_HW_ERR_OUT_OF_MEMORY;
}
status = __vxge_hw_channel_initialize(&ring->channel);
if (status != VXGE_HW_OK) {
__vxge_hw_ring_delete(vp);
goto exit;
}
/* Note:
* Specifying rxd_init callback means two things:
* 1) rxds need to be initialized by driver at channel-open time;
* 2) rxds need to be posted at channel-open time
* (that's what the initial_replenish() below does)
* Currently we don't have a case when the 1) is done without the 2).
*/
if (ring->rxd_init) {
status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK) {
__vxge_hw_ring_delete(vp);
goto exit;
}
}
/* initial replenish will increment the counter in its post() routine,
* we have to reset it */
ring->stats->common_stats.usage_cnt = 0;
exit:
return status;
}
/*
* vxge_hw_device_config_default_get - Initialize device config with defaults.
* Initialize Titan device config with default values.
*/
enum vxge_hw_status __devinit
vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
{
u32 i;
device_config->dma_blockpool_initial =
VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
device_config->rth_en = VXGE_HW_RTH_DEFAULT;
device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
device_config->vp_config[i].vp_id = i;
device_config->vp_config[i].min_bandwidth =
VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
device_config->vp_config[i].ring.ring_blocks =
VXGE_HW_DEF_RING_BLOCKS;
device_config->vp_config[i].ring.buffer_mode =
VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
device_config->vp_config[i].ring.scatter_mode =
VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
device_config->vp_config[i].ring.rxds_limit =
VXGE_HW_DEF_RING_RXDS_LIMIT;
device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
device_config->vp_config[i].fifo.fifo_blocks =
VXGE_HW_MIN_FIFO_BLOCKS;
device_config->vp_config[i].fifo.max_frags =
VXGE_HW_MAX_FIFO_FRAGS;
device_config->vp_config[i].fifo.memblock_size =
VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
device_config->vp_config[i].fifo.alignment_size =
VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
device_config->vp_config[i].fifo.intr =
VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
device_config->vp_config[i].fifo.no_snoop_bits =
VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
device_config->vp_config[i].tti.intr_enable =
VXGE_HW_TIM_INTR_DEFAULT;
device_config->vp_config[i].tti.btimer_val =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.timer_ac_en =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.timer_ci_en =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.timer_ri_en =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.rtimer_val =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.util_sel =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.ltimer_val =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.urange_a =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.uec_a =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.urange_b =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.uec_b =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.urange_c =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.uec_c =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].tti.uec_d =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.intr_enable =
VXGE_HW_TIM_INTR_DEFAULT;
device_config->vp_config[i].rti.btimer_val =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.timer_ac_en =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.timer_ci_en =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.timer_ri_en =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.rtimer_val =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.util_sel =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.ltimer_val =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.urange_a =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.uec_a =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.urange_b =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.uec_b =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.urange_c =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.uec_c =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].rti.uec_d =
VXGE_HW_USE_FLASH_DEFAULT;
device_config->vp_config[i].mtu =
VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
device_config->vp_config[i].rpa_strip_vlan_tag =
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
}
return VXGE_HW_OK;
}
/*
* __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
* Set the swapper bits appropriately for the vpath.
*/
static enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
#ifndef __BIG_ENDIAN
u64 val64;
val64 = readq(&vpath_reg->vpath_general_cfg1);
wmb();
val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
writeq(val64, &vpath_reg->vpath_general_cfg1);
wmb();
#endif
return VXGE_HW_OK;
}
/*
* __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
* Set the swapper bits appropriately for the vpath.
*/
static enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
u64 val64;
val64 = readq(&legacy_reg->pifm_wr_swap_en);
if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
val64 = readq(&vpath_reg->kdfcctl_cfg0);
wmb();
val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
writeq(val64, &vpath_reg->kdfcctl_cfg0);
wmb();
}
return VXGE_HW_OK;
}
/*
* vxge_hw_mgmt_reg_read - Read Titan register.
*/
enum vxge_hw_status
vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
enum vxge_hw_mgmt_reg_type type,
u32 index, u32 offset, u64 *value)
{
enum vxge_hw_status status = VXGE_HW_OK;
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
status = VXGE_HW_ERR_INVALID_DEVICE;
goto exit;
}
switch (type) {
case vxge_hw_mgmt_reg_type_legacy:
if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
*value = readq((void __iomem *)hldev->legacy_reg + offset);
break;
case vxge_hw_mgmt_reg_type_toc:
if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
*value = readq((void __iomem *)hldev->toc_reg + offset);
break;
case vxge_hw_mgmt_reg_type_common:
if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
*value = readq((void __iomem *)hldev->common_reg + offset);
break;
case vxge_hw_mgmt_reg_type_mrpcim:
if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
break;
}
if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
*value = readq((void __iomem *)hldev->mrpcim_reg + offset);
break;
case vxge_hw_mgmt_reg_type_srpcim:
if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
break;
}
if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
status = VXGE_HW_ERR_INVALID_INDEX;
break;
}
if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
*value = readq((void __iomem *)hldev->srpcim_reg[index] +
offset);
break;
case vxge_hw_mgmt_reg_type_vpmgmt:
if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
status = VXGE_HW_ERR_INVALID_INDEX;
break;
}
if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
*value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
offset);
break;
case vxge_hw_mgmt_reg_type_vpath:
if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
status = VXGE_HW_ERR_INVALID_INDEX;
break;
}
if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
status = VXGE_HW_ERR_INVALID_INDEX;
break;
}
if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
*value = readq((void __iomem *)hldev->vpath_reg[index] +
offset);
break;
default:
status = VXGE_HW_ERR_INVALID_TYPE;
break;
}
exit:
return status;
}
/*
* vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
*/
enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
{
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
enum vxge_hw_status status = VXGE_HW_OK;
int i = 0, j = 0;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!((vpath_mask) & vxge_mBIT(i)))
continue;
vpmgmt_reg = hldev->vpmgmt_reg[i];
for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
& VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
return VXGE_HW_FAIL;
}
}
return status;
}
/*
* vxge_hw_mgmt_reg_Write - Write Titan register.
*/
enum vxge_hw_status
vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
enum vxge_hw_mgmt_reg_type type,
u32 index, u32 offset, u64 value)
{
enum vxge_hw_status status = VXGE_HW_OK;
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
status = VXGE_HW_ERR_INVALID_DEVICE;
goto exit;
}
switch (type) {
case vxge_hw_mgmt_reg_type_legacy:
if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
writeq(value, (void __iomem *)hldev->legacy_reg + offset);
break;
case vxge_hw_mgmt_reg_type_toc:
if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
writeq(value, (void __iomem *)hldev->toc_reg + offset);
break;
case vxge_hw_mgmt_reg_type_common:
if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
writeq(value, (void __iomem *)hldev->common_reg + offset);
break;
case vxge_hw_mgmt_reg_type_mrpcim:
if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
break;
}
if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
break;
case vxge_hw_mgmt_reg_type_srpcim:
if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
break;
}
if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
status = VXGE_HW_ERR_INVALID_INDEX;
break;
}
if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
offset);
break;
case vxge_hw_mgmt_reg_type_vpmgmt:
if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
status = VXGE_HW_ERR_INVALID_INDEX;
break;
}
if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
offset);
break;
case vxge_hw_mgmt_reg_type_vpath:
if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
status = VXGE_HW_ERR_INVALID_INDEX;
break;
}
if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
status = VXGE_HW_ERR_INVALID_OFFSET;
break;
}
writeq(value, (void __iomem *)hldev->vpath_reg[index] +
offset);
break;
default:
status = VXGE_HW_ERR_INVALID_TYPE;
break;
}
exit:
return status;
}
/*
* __vxge_hw_fifo_abort - Returns the TxD
* This function terminates the TxDs of fifo
*/
static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
{
void *txdlh;
for (;;) {
vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
if (txdlh == NULL)
break;
vxge_hw_channel_dtr_complete(&fifo->channel);
if (fifo->txdl_term) {
fifo->txdl_term(txdlh,
VXGE_HW_TXDL_STATE_POSTED,
fifo->channel.userdata);
}
vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
}
return VXGE_HW_OK;
}
/*
* __vxge_hw_fifo_reset - Resets the fifo
* This function resets the fifo during vpath reset operation
*/
static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
{
enum vxge_hw_status status = VXGE_HW_OK;
__vxge_hw_fifo_abort(fifo);
status = __vxge_hw_channel_reset(&fifo->channel);
return status;
}
/*
* __vxge_hw_fifo_delete - Removes the FIFO
* This function freeup the memory pool and removes the FIFO
*/
static enum vxge_hw_status
__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
__vxge_hw_fifo_abort(fifo);
if (fifo->mempool)
__vxge_hw_mempool_destroy(fifo->mempool);
vp->vpath->fifoh = NULL;
__vxge_hw_channel_free(&fifo->channel);
return VXGE_HW_OK;
}
/*
* __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
* list callback
* This function is callback passed to __vxge_hw_mempool_create to create memory
* pool for TxD list
*/
static void
__vxge_hw_fifo_mempool_item_alloc(
struct vxge_hw_mempool *mempoolh,
u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
u32 index, u32 is_last)
{
u32 memblock_item_idx;
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
struct vxge_hw_fifo_txd *txdp =
(struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
struct __vxge_hw_fifo *fifo =
(struct __vxge_hw_fifo *)mempoolh->userdata;
void *memblock = mempoolh->memblocks_arr[memblock_index];
vxge_assert(txdp);
txdp->host_control = (u64) (size_t)
__vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
&memblock_item_idx);
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
vxge_assert(txdl_priv);
fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
/* pre-format HW's TxDL's private */
txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
txdl_priv->dma_handle = dma_object->handle;
txdl_priv->memblock = memblock;
txdl_priv->first_txdp = txdp;
txdl_priv->next_txdl_priv = NULL;
txdl_priv->alloc_frags = 0;
}
/*
* __vxge_hw_fifo_create - Create a FIFO
* This function creates FIFO and initializes it.
*/
static enum vxge_hw_status
__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_fifo_attr *attr)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_fifo *fifo;
struct vxge_hw_fifo_config *config;
u32 txdl_size, txdl_per_memblock;
struct vxge_hw_mempool_cbs fifo_mp_callback;
struct __vxge_hw_virtualpath *vpath;
if ((vp == NULL) || (attr == NULL)) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
txdl_per_memblock = config->memblock_size / txdl_size;
fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
VXGE_HW_CHANNEL_TYPE_FIFO,
config->fifo_blocks * txdl_per_memblock,
attr->per_txdl_space, attr->userdata);
if (fifo == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
vpath->fifoh = fifo;
fifo->nofl_db = vpath->nofl_db;
fifo->vp_id = vpath->vp_id;
fifo->vp_reg = vpath->vp_reg;
fifo->stats = &vpath->sw_stats->fifo_stats;
fifo->config = config;
/* apply "interrupts per txdl" attribute */
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
if (fifo->config->intr)
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
fifo->no_snoop_bits = config->no_snoop_bits;
/*
* FIFO memory management strategy:
*
* TxDL split into three independent parts:
* - set of TxD's
* - TxD HW private part
* - driver private part
*
* Adaptative memory allocation used. i.e. Memory allocated on
* demand with the size which will fit into one memory block.
* One memory block may contain more than one TxDL.
*
* During "reserve" operations more memory can be allocated on demand
* for example due to FIFO full condition.
*
* Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
* routine which will essentially stop the channel and free resources.
*/
/* TxDL common private size == TxDL private + driver private */
fifo->priv_size =
sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
fifo->per_txdl_space = attr->per_txdl_space;
/* recompute txdl size to be cacheline aligned */
fifo->txdl_size = txdl_size;
fifo->txdl_per_memblock = txdl_per_memblock;
fifo->txdl_term = attr->txdl_term;
fifo->callback = attr->callback;
if (fifo->txdl_per_memblock == 0) {
__vxge_hw_fifo_delete(vp);
status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
goto exit;
}
fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
fifo->mempool =
__vxge_hw_mempool_create(vpath->hldev,
fifo->config->memblock_size,
fifo->txdl_size,
fifo->priv_size,
(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
&fifo_mp_callback,
fifo);
if (fifo->mempool == NULL) {
__vxge_hw_fifo_delete(vp);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
status = __vxge_hw_channel_initialize(&fifo->channel);
if (status != VXGE_HW_OK) {
__vxge_hw_fifo_delete(vp);
goto exit;
}
vxge_assert(fifo->channel.reserve_ptr);
exit:
return status;
}
/*
* __vxge_hw_vpath_pci_read - Read the content of given address
* in pci config space.
* Read from the vpath pci config space.
*/
static enum vxge_hw_status
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
u32 phy_func_0, u32 offset, u32 *val)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
if (phy_func_0)
val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
writeq(val64, &vp_reg->pci_config_access_cfg1);
wmb();
writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
&vp_reg->pci_config_access_cfg2);
wmb();
status = __vxge_hw_device_register_poll(
&vp_reg->pci_config_access_cfg2,
VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
if (status != VXGE_HW_OK)
goto exit;
val64 = readq(&vp_reg->pci_config_access_status);
if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
status = VXGE_HW_FAIL;
*val = 0;
} else
*val = (u32)vxge_bVALn(val64, 32, 32);
exit:
return status;
}
/**
* vxge_hw_device_flick_link_led - Flick (blink) link LED.
* @hldev: HW device.
* @on_off: TRUE if flickering to be on, FALSE to be off
*
* Flicker the link LED.
*/
enum vxge_hw_status
vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
{
struct __vxge_hw_virtualpath *vpath;
u64 data0, data1 = 0, steer_ctrl = 0;
enum vxge_hw_status status;
if (hldev == NULL) {
status = VXGE_HW_ERR_INVALID_DEVICE;
goto exit;
}
vpath = &hldev->virtual_paths[hldev->first_vp_id];
data0 = on_off;
status = vxge_hw_vpath_fw_api(vpath,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
exit:
return status;
}
/*
* __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
*/
enum vxge_hw_status
__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
u32 action, u32 rts_table, u32 offset,
u64 *data0, u64 *data1)
{
enum vxge_hw_status status;
u64 steer_ctrl = 0;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
if ((rts_table ==
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
(rts_table ==
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
(rts_table ==
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
(rts_table ==
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
}
status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
data0, data1, &steer_ctrl);
if (status != VXGE_HW_OK)
goto exit;
if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
(rts_table !=
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
*data1 = 0;
exit:
return status;
}
/*
* __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
*/
enum vxge_hw_status
__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
u32 rts_table, u32 offset, u64 steer_data0,
u64 steer_data1)
{
u64 data0, data1 = 0, steer_ctrl = 0;
enum vxge_hw_status status;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
data0 = steer_data0;
if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
(rts_table ==
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
data1 = steer_data1;
status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
&data0, &data1, &steer_ctrl);
exit:
return status;
}
/*
* vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
*/
enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
struct __vxge_hw_vpath_handle *vp,
enum vxge_hw_rth_algoritms algorithm,
struct vxge_hw_rth_hash_types *hash_type,
u16 bucket_size)
{
u64 data0, data1;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_get(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
0, &data0, &data1);
if (status != VXGE_HW_OK)
goto exit;
data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
if (hash_type->hash_type_tcpipv4_en)
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
if (hash_type->hash_type_ipv4_en)
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
if (hash_type->hash_type_tcpipv6_en)
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
if (hash_type->hash_type_ipv6_en)
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
if (hash_type->hash_type_tcpipv6ex_en)
data0 |=
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
if (hash_type->hash_type_ipv6ex_en)
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
else
data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
status = __vxge_hw_vpath_rts_table_set(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
0, data0, 0);
exit:
return status;
}
static void
vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
u16 flag, u8 *itable)
{
switch (flag) {
case 1:
*data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
itable[j]);
case 2:
*data0 |=
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
itable[j]);
case 3:
*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
itable[j]);
case 4:
*data1 |=
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
itable[j]);
default:
return;
}
}
/*
* vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
*/
enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
struct __vxge_hw_vpath_handle **vpath_handles,
u32 vpath_count,
u8 *mtable,
u8 *itable,
u32 itable_size)
{
u32 i, j, action, rts_table;
u64 data0;
u64 data1;
u32 max_entries;
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
max_entries = (((u32)1) << itable_size);
if (vp->vpath->hldev->config.rth_it_type
== VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
rts_table =
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
for (j = 0; j < max_entries; j++) {
data1 = 0;
data0 =
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
itable[j]);
status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
action, rts_table, j, data0, data1);
if (status != VXGE_HW_OK)
goto exit;
}
for (j = 0; j < max_entries; j++) {
data1 = 0;
data0 =
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
itable[j]);
status = __vxge_hw_vpath_rts_table_set(
vpath_handles[mtable[itable[j]]], action,
rts_table, j, data0, data1);
if (status != VXGE_HW_OK)
goto exit;
}
} else {
action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
rts_table =
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
for (i = 0; i < vpath_count; i++) {
for (j = 0; j < max_entries;) {
data0 = 0;
data1 = 0;
while (j < max_entries) {
if (mtable[itable[j]] != i) {
j++;
continue;
}
vxge_hw_rts_rth_data0_data1_get(j,
&data0, &data1, 1, itable);
j++;
break;
}
while (j < max_entries) {
if (mtable[itable[j]] != i) {
j++;
continue;
}
vxge_hw_rts_rth_data0_data1_get(j,
&data0, &data1, 2, itable);
j++;
break;
}
while (j < max_entries) {
if (mtable[itable[j]] != i) {
j++;
continue;
}
vxge_hw_rts_rth_data0_data1_get(j,
&data0, &data1, 3, itable);
j++;
break;
}
while (j < max_entries) {
if (mtable[itable[j]] != i) {
j++;
continue;
}
vxge_hw_rts_rth_data0_data1_get(j,
&data0, &data1, 4, itable);
j++;
break;
}
if (data0 != 0) {
status = __vxge_hw_vpath_rts_table_set(
vpath_handles[i],
action, rts_table,
0, data0, data1);
if (status != VXGE_HW_OK)
goto exit;
}
}
}
}
exit:
return status;
}
/**
* vxge_hw_vpath_check_leak - Check for memory leak
* @ringh: Handle to the ring object used for receive
*
* If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
* PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
* Returns: VXGE_HW_FAIL, if leak has occurred.
*
*/
enum vxge_hw_status
vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
{
enum vxge_hw_status status = VXGE_HW_OK;
u64 rxd_new_count, rxd_spat;
if (ring == NULL)
return status;
rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
rxd_spat = readq(&ring->vp_reg->prc_cfg6);
rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
if (rxd_new_count >= rxd_spat)
status = VXGE_HW_FAIL;
return status;
}
/*
* __vxge_hw_vpath_mgmt_read
* This routine reads the vpath_mgmt registers
*/
static enum vxge_hw_status
__vxge_hw_vpath_mgmt_read(
struct __vxge_hw_device *hldev,
struct __vxge_hw_virtualpath *vpath)
{
u32 i, mtu = 0, max_pyld = 0;
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
val64 = readq(&vpath->vpmgmt_reg->
rxmac_cfg0_port_vpmgmt_clone[i]);
max_pyld =
(u32)
VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
(val64);
if (mtu < max_pyld)
mtu = max_pyld;
}
vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (val64 & vxge_mBIT(i))
vpath->vsport_number = i;
}
val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
else
VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
return status;
}
/*
* __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
* This routine checks the vpath_rst_in_prog register to see if
* adapter completed the reset process for the vpath
*/
static enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
{
enum vxge_hw_status status;
status = __vxge_hw_device_register_poll(
&vpath->hldev->common_reg->vpath_rst_in_prog,
VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
1 << (16 - vpath->vp_id)),
vpath->hldev->config.device_poll_millis);
return status;
}
/*
* __vxge_hw_vpath_reset
* This routine resets the vpath on the device
*/
static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->cmn_rsthdlr_cfg0);
return status;
}
/*
* __vxge_hw_vpath_sw_reset
* This routine resets the vpath structures
*/
static enum vxge_hw_status
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
if (vpath->ringh) {
status = __vxge_hw_ring_reset(vpath->ringh);
if (status != VXGE_HW_OK)
goto exit;
}
if (vpath->fifoh)
status = __vxge_hw_fifo_reset(vpath->fifoh);
exit:
return status;
}
/*
* __vxge_hw_vpath_prc_configure
* This routine configures the prc registers of virtual path using the config
* passed
*/
static void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vp_config *vp_config;
struct vxge_hw_vpath_reg __iomem *vp_reg;
vpath = &hldev->virtual_paths[vp_id];
vp_reg = vpath->vp_reg;
vp_config = vpath->vp_config;
if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
return;
val64 = readq(&vp_reg->prc_cfg1);
val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
writeq(val64, &vp_reg->prc_cfg1);
val64 = readq(&vpath->vp_reg->prc_cfg6);
val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
writeq(val64, &vpath->vp_reg->prc_cfg6);
val64 = readq(&vp_reg->prc_cfg7);
if (vpath->vp_config->ring.scatter_mode !=
VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
switch (vpath->vp_config->ring.scatter_mode) {
case VXGE_HW_RING_SCATTER_MODE_A:
val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
break;
case VXGE_HW_RING_SCATTER_MODE_B:
val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
break;
case VXGE_HW_RING_SCATTER_MODE_C:
val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
break;
}
}
writeq(val64, &vp_reg->prc_cfg7);
writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
__vxge_hw_ring_first_block_address_get(
vpath->ringh) >> 3), &vp_reg->prc_cfg5);
val64 = readq(&vp_reg->prc_cfg4);
val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
else
val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
writeq(val64, &vp_reg->prc_cfg4);
}
/*
* __vxge_hw_vpath_kdfc_configure
* This routine configures the kdfc registers of virtual path using the
* config passed
*/
static enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
u64 vpath_stride;
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
vpath = &hldev->virtual_paths[vp_id];
vp_reg = vpath->vp_reg;
status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
if (status != VXGE_HW_OK)
goto exit;
val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
vpath->max_kdfc_db =
(u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
val64+1)/2;
if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
vpath->max_nofl_db = vpath->max_kdfc_db;
if (vpath->max_nofl_db <
((vpath->vp_config->fifo.memblock_size /
(vpath->vp_config->fifo.max_frags *
sizeof(struct vxge_hw_fifo_txd))) *
vpath->vp_config->fifo.fifo_blocks)) {
return VXGE_HW_BADCFG_FIFO_BLOCKS;
}
val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
(vpath->max_nofl_db*2)-1);
}
writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
&vp_reg->kdfc_fifo_trpl_ctrl);
val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
#ifndef __BIG_ENDIAN
VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
#endif
VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
wmb();
vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
vpath->nofl_db =
(struct __vxge_hw_non_offload_db_wrapper __iomem *)
(hldev->kdfc + (vp_id *
VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
vpath_stride)));
exit:
return status;
}
/*
* __vxge_hw_vpath_mac_configure
* This routine configures the mac of virtual path using the config passed
*/
static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vp_config *vp_config;
struct vxge_hw_vpath_reg __iomem *vp_reg;
vpath = &hldev->virtual_paths[vp_id];
vp_reg = vpath->vp_reg;
vp_config = vpath->vp_config;
writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
vpath->vsport_number), &vp_reg->xmac_vsport_choice);
if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
val64 = readq(&vp_reg->xmac_rpa_vcfg);
if (vp_config->rpa_strip_vlan_tag !=
VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
if (vp_config->rpa_strip_vlan_tag)
val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
else
val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
}
writeq(val64, &vp_reg->xmac_rpa_vcfg);
val64 = readq(&vp_reg->rxmac_vcfg0);
if (vp_config->mtu !=
VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
if ((vp_config->mtu +
VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
vp_config->mtu +
VXGE_HW_MAC_HEADER_MAX_SIZE);
else
val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
vpath->max_mtu);
}
writeq(val64, &vp_reg->rxmac_vcfg0);
val64 = readq(&vp_reg->rxmac_vcfg1);
val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
if (hldev->config.rth_it_type ==
VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
0x2) |
VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
}
writeq(val64, &vp_reg->rxmac_vcfg1);
}
return status;
}
/*
* __vxge_hw_vpath_tim_configure
* This routine configures the tim registers of virtual path using the config
* passed
*/
static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
struct vxge_hw_vp_config *config;
vpath = &hldev->virtual_paths[vp_id];
vp_reg = vpath->vp_reg;
config = vpath->vp_config;
writeq(0, &vp_reg->tim_dest_addr);
writeq(0, &vp_reg->tim_vpath_map);
writeq(0, &vp_reg->tim_bitmap);
writeq(0, &vp_reg->tim_remap);
if (config->ring.enable == VXGE_HW_RING_ENABLE)
writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
(vp_id * VXGE_HW_MAX_INTR_PER_VP) +
VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
val64 = readq(&vp_reg->tim_pci_cfg);
val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
writeq(val64, &vp_reg->tim_pci_cfg);
if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
0x3ffffff);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
config->tti.btimer_val);
}
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
if (config->tti.timer_ac_en)
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
else
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
}
if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
if (config->tti.timer_ci_en)
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
else
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
}
if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
config->tti.urange_a);
}
if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
config->tti.urange_b);
}
if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
config->tti.urange_c);
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
vpath->tim_tti_cfg1_saved = val64;
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
config->tti.uec_a);
}
if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
config->tti.uec_b);
}
if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
config->tti.uec_c);
}
if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
config->tti.uec_d);
}
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
if (config->tti.timer_ri_en)
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
else
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
}
if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
0x3ffffff);
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
config->tti.rtimer_val);
}
if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
}
if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
0x3ffffff);
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
config->tti.ltimer_val);
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
vpath->tim_tti_cfg3_saved = val64;
}
if (config->ring.enable == VXGE_HW_RING_ENABLE) {
val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
0x3ffffff);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
config->rti.btimer_val);
}
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
if (config->rti.timer_ac_en)
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
else
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
}
if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
if (config->rti.timer_ci_en)
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
else
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
}
if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
config->rti.urange_a);
}
if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
config->rti.urange_b);
}
if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
config->rti.urange_c);
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
vpath->tim_rti_cfg1_saved = val64;
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
config->rti.uec_a);
}
if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
config->rti.uec_b);
}
if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
config->rti.uec_c);
}
if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
config->rti.uec_d);
}
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
if (config->rti.timer_ri_en)
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
else
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
}
if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
0x3ffffff);
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
config->rti.rtimer_val);
}
if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
}
if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
0x3ffffff);
val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
config->rti.ltimer_val);
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
vpath->tim_rti_cfg3_saved = val64;
}
val64 = 0;
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
writeq(val64, &vp_reg->tim_wrkld_clc);
return status;
}
/*
* __vxge_hw_vpath_initialize
* This routine is the final phase of init which initializes the
* registers of the vpath using the configuration passed.
*/
static enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
u32 val32;
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
vpath = &hldev->virtual_paths[vp_id];
if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
goto exit;
}
vp_reg = vpath->vp_reg;
status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
if (status != VXGE_HW_OK)
goto exit;
val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
/* Get MRRS value from device control */
status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
if (status == VXGE_HW_OK) {
val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
val64 &=
~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
val64 |=
VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
}
val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
val64 |=
VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
VXGE_HW_MAX_PAYLOAD_SIZE_512);
val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
exit:
return status;
}
/*
* __vxge_hw_vp_terminate - Terminate Virtual Path structure
* This routine closes all channels it opened and freeup memory
*/
static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
{
struct __vxge_hw_virtualpath *vpath;
vpath = &hldev->virtual_paths[vp_id];
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
goto exit;
VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
vpath->hldev->tim_int_mask1, vpath->vp_id);
hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
/* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
* work after the interface is brought down.
*/
spin_lock(&vpath->lock);
vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
spin_unlock(&vpath->lock);
vpath->vpmgmt_reg = NULL;
vpath->nofl_db = NULL;
vpath->max_mtu = 0;
vpath->vsport_number = 0;
vpath->max_kdfc_db = 0;
vpath->max_nofl_db = 0;
vpath->ringh = NULL;
vpath->fifoh = NULL;
memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
vpath->stats_block = 0;
vpath->hw_stats = NULL;
vpath->hw_stats_sav = NULL;
vpath->sw_stats = NULL;
exit:
return;
}
/*
* __vxge_hw_vp_initialize - Initialize Virtual Path structure
* This routine is the initial phase of init which resets the vpath and
* initializes the software support structures.
*/
static enum vxge_hw_status
__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
struct vxge_hw_vp_config *config)
{
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
goto exit;
}
vpath = &hldev->virtual_paths[vp_id];
spin_lock_init(&vpath->lock);
vpath->vp_id = vp_id;
vpath->vp_open = VXGE_HW_VP_OPEN;
vpath->hldev = hldev;
vpath->vp_config = config;
vpath->vp_reg = hldev->vpath_reg[vp_id];
vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
__vxge_hw_vpath_reset(hldev, vp_id);
status = __vxge_hw_vpath_reset_check(vpath);
if (status != VXGE_HW_OK) {
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
goto exit;
}
status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
if (status != VXGE_HW_OK) {
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
goto exit;
}
INIT_LIST_HEAD(&vpath->vpath_handles);
vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
hldev->tim_int_mask1, vp_id);
status = __vxge_hw_vpath_initialize(hldev, vp_id);
if (status != VXGE_HW_OK)
__vxge_hw_vp_terminate(hldev, vp_id);
exit:
return status;
}
/*
* vxge_hw_vpath_mtu_set - Set MTU.
* Set new MTU value. Example, to use jumbo frames:
* vxge_hw_vpath_mtu_set(my_device, 9600);
*/
enum vxge_hw_status
vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
vpath = vp->vpath;
new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
status = VXGE_HW_ERR_INVALID_MTU_SIZE;
val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
exit:
return status;
}
/*
* vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
* Enable the DMA vpath statistics. The function is to be called to re-enable
* the adapter to update stats into the host memory
*/
static enum vxge_hw_status
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
vpath = vp->vpath;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
memcpy(vpath->hw_stats_sav, vpath->hw_stats,
sizeof(struct vxge_hw_vpath_stats_hw_info));
status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
exit:
return status;
}
/*
* __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
* This function allocates a block from block pool or from the system
*/
static struct __vxge_hw_blockpool_entry *
__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
{
struct __vxge_hw_blockpool_entry *entry = NULL;
struct __vxge_hw_blockpool *blockpool;
blockpool = &devh->block_pool;
if (size == blockpool->block_size) {
if (!list_empty(&blockpool->free_block_list))
entry = (struct __vxge_hw_blockpool_entry *)
list_first_entry(&blockpool->free_block_list,
struct __vxge_hw_blockpool_entry,
item);
if (entry != NULL) {
list_del(&entry->item);
blockpool->pool_size--;
}
}
if (entry != NULL)
__vxge_hw_blockpool_blocks_add(blockpool);
return entry;
}
/*
* vxge_hw_vpath_open - Open a virtual path on a given adapter
* This function is used to open access to virtual path of an
* adapter for offload, GRO operations. This function returns
* synchronously.
*/
enum vxge_hw_status
vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
struct vxge_hw_vpath_attr *attr,
struct __vxge_hw_vpath_handle **vpath_handle)
{
struct __vxge_hw_virtualpath *vpath;
struct __vxge_hw_vpath_handle *vp;
enum vxge_hw_status status;
vpath = &hldev->virtual_paths[attr->vp_id];
if (vpath->vp_open == VXGE_HW_VP_OPEN) {
status = VXGE_HW_ERR_INVALID_STATE;
goto vpath_open_exit1;
}
status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
&hldev->config.vp_config[attr->vp_id]);
if (status != VXGE_HW_OK)
goto vpath_open_exit1;
vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
if (vp == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto vpath_open_exit2;
}
vp->vpath = vpath;
if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
if (status != VXGE_HW_OK)
goto vpath_open_exit6;
}
if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
status = __vxge_hw_ring_create(vp, &attr->ring_attr);
if (status != VXGE_HW_OK)
goto vpath_open_exit7;
__vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
}
vpath->fifoh->tx_intr_num =
(attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
VXGE_HW_VPATH_INTR_TX;
vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
VXGE_HW_BLOCK_SIZE);
if (vpath->stats_block == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto vpath_open_exit8;
}
vpath->hw_stats = vpath->stats_block->memblock;
memset(vpath->hw_stats, 0,
sizeof(struct vxge_hw_vpath_stats_hw_info));
hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
vpath->hw_stats;
vpath->hw_stats_sav =
&hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
memset(vpath->hw_stats_sav, 0,
sizeof(struct vxge_hw_vpath_stats_hw_info));
writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
status = vxge_hw_vpath_stats_enable(vp);
if (status != VXGE_HW_OK)
goto vpath_open_exit8;
list_add(&vp->item, &vpath->vpath_handles);
hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
*vpath_handle = vp;
attr->fifo_attr.userdata = vpath->fifoh;
attr->ring_attr.userdata = vpath->ringh;
return VXGE_HW_OK;
vpath_open_exit8:
if (vpath->ringh != NULL)
__vxge_hw_ring_delete(vp);
vpath_open_exit7:
if (vpath->fifoh != NULL)
__vxge_hw_fifo_delete(vp);
vpath_open_exit6:
vfree(vp);
vpath_open_exit2:
__vxge_hw_vp_terminate(hldev, attr->vp_id);
vpath_open_exit1:
return status;
}
/**
* vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
* (vpath) open
* @vp: Handle got from previous vpath open
*
* This function is used to close access to virtual path opened
* earlier.
*/
void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_virtualpath *vpath = vp->vpath;
struct __vxge_hw_ring *ring = vpath->ringh;
struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
u64 new_count, val64, val164;
if (vdev->titan1) {
new_count = readq(&vpath->vp_reg->rxdmem_size);
new_count &= 0x1fff;
} else
new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
&vpath->vp_reg->prc_rxd_doorbell);
readl(&vpath->vp_reg->prc_rxd_doorbell);
val164 /= 2;
val64 = readq(&vpath->vp_reg->prc_cfg6);
val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
val64 &= 0x1ff;
/*
* Each RxD is of 4 qwords
*/
new_count -= (val64 + 1);
val64 = min(val164, new_count) / 4;
ring->rxds_limit = min(ring->rxds_limit, val64);
if (ring->rxds_limit < 4)
ring->rxds_limit = 4;
}
/*
* __vxge_hw_blockpool_block_free - Frees a block from block pool
* @devh: Hal device
* @entry: Entry of block to be freed
*
* This function frees a block from block pool
*/
static void
__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
struct __vxge_hw_blockpool_entry *entry)
{
struct __vxge_hw_blockpool *blockpool;
blockpool = &devh->block_pool;
if (entry->length == blockpool->block_size) {
list_add(&entry->item, &blockpool->free_block_list);
blockpool->pool_size++;
}
__vxge_hw_blockpool_blocks_remove(blockpool);
}
/*
* vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
* This function is used to close access to virtual path opened
* earlier.
*/
enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_virtualpath *vpath = NULL;
struct __vxge_hw_device *devh = NULL;
u32 vp_id = vp->vpath->vp_id;
u32 is_empty = TRUE;
enum vxge_hw_status status = VXGE_HW_OK;
vpath = vp->vpath;
devh = vpath->hldev;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto vpath_close_exit;
}
list_del(&vp->item);
if (!list_empty(&vpath->vpath_handles)) {
list_add(&vp->item, &vpath->vpath_handles);
is_empty = FALSE;
}
if (!is_empty) {
status = VXGE_HW_FAIL;
goto vpath_close_exit;
}
devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
if (vpath->ringh != NULL)
__vxge_hw_ring_delete(vp);
if (vpath->fifoh != NULL)
__vxge_hw_fifo_delete(vp);
if (vpath->stats_block != NULL)
__vxge_hw_blockpool_block_free(devh, vpath->stats_block);
vfree(vp);
__vxge_hw_vp_terminate(devh, vp_id);
vpath_close_exit:
return status;
}
/*
* vxge_hw_vpath_reset - Resets vpath
* This function is used to request a reset of vpath
*/
enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
{
enum vxge_hw_status status;
u32 vp_id;
struct __vxge_hw_virtualpath *vpath = vp->vpath;
vp_id = vpath->vp_id;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
if (status == VXGE_HW_OK)
vpath->sw_stats->soft_reset_cnt++;
exit:
return status;
}
/*
* vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
* This function poll's for the vpath reset completion and re initializes
* the vpath.
*/
enum vxge_hw_status
vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_virtualpath *vpath = NULL;
enum vxge_hw_status status;
struct __vxge_hw_device *hldev;
u32 vp_id;
vp_id = vp->vpath->vp_id;
vpath = vp->vpath;
hldev = vpath->hldev;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
status = __vxge_hw_vpath_reset_check(vpath);
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_initialize(hldev, vp_id);
if (status != VXGE_HW_OK)
goto exit;
if (vpath->ringh != NULL)
__vxge_hw_vpath_prc_configure(hldev, vp_id);
memset(vpath->hw_stats, 0,
sizeof(struct vxge_hw_vpath_stats_hw_info));
memset(vpath->hw_stats_sav, 0,
sizeof(struct vxge_hw_vpath_stats_hw_info));
writeq(vpath->stats_block->dma_addr,
&vpath->vp_reg->stats_cfg);
status = vxge_hw_vpath_stats_enable(vp);
exit:
return status;
}
/*
* vxge_hw_vpath_enable - Enable vpath.
* This routine clears the vpath reset thereby enabling a vpath
* to start forwarding frames and generating interrupts.
*/
void
vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_device *hldev;
u64 val64;
hldev = vp->vpath->hldev;
val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
1 << (16 - vp->vpath->vp_id));
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->cmn_rsthdlr_cfg1);
}
| gpl-2.0 |
davem330/net | drivers/ata/sata_mv.c | 387 | 122443 | /*
* sata_mv.c - Marvell SATA support
*
* Copyright 2008-2009: Marvell Corporation, all rights reserved.
* Copyright 2005: EMC Corporation, all rights reserved.
* Copyright 2005 Red Hat, Inc. All rights reserved.
*
* Originally written by Brett Russ.
* Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
*
* Please ALWAYS copy linux-ide@vger.kernel.org on emails.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* sata_mv TODO list:
*
* --> Develop a low-power-consumption strategy, and implement it.
*
* --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
*
* --> [Experiment, Marvell value added] Is it possible to use target
* mode to cross-connect two Linux boxes with Marvell cards? If so,
* creating LibATA target mode support would be very interesting.
*
* Target mode, for those without docs, is the ability to directly
* connect two SATA ports.
*/
/*
* 80x1-B2 errata PCI#11:
*
* Users of the 6041/6081 Rev.B2 chips (current is C0)
* should be careful to insert those cards only onto PCI-X bus #0,
* and only in device slots 0..7, not higher. The chips may not
* work correctly otherwise (note: this is a pretty rare condition).
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mbus.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <linux/libata.h>
#define DRV_NAME "sata_mv"
#define DRV_VERSION "1.28"
/*
* module options
*/
static int msi;
#ifdef CONFIG_PCI
module_param(msi, int, S_IRUGO);
MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
#endif
static int irq_coalescing_io_count;
module_param(irq_coalescing_io_count, int, S_IRUGO);
MODULE_PARM_DESC(irq_coalescing_io_count,
"IRQ coalescing I/O count threshold (0..255)");
static int irq_coalescing_usecs;
module_param(irq_coalescing_usecs, int, S_IRUGO);
MODULE_PARM_DESC(irq_coalescing_usecs,
"IRQ coalescing time threshold in usecs");
enum {
/* BAR's are enumerated in terms of pci_resource_start() terms */
MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
MV_IO_BAR = 2, /* offset 0x18: IO space */
MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
MAX_COAL_IO_COUNT = 255, /* completed I/O count */
MV_PCI_REG_BASE = 0,
/*
* Per-chip ("all ports") interrupt coalescing feature.
* This is only for GEN_II / GEN_IIE hardware.
*
* Coalescing defers the interrupt until either the IO_THRESHOLD
* (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
*/
COAL_REG_BASE = 0x18000,
IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
/*
* Registers for the (unused here) transaction coalescing feature:
*/
TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
SATAHC0_REG_BASE = 0x20000,
FLASH_CTL = 0x1046c,
GPIO_PORT_CTL = 0x104f0,
RESET_CFG = 0x180d8,
MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
MV_MAX_Q_DEPTH = 32,
MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
/* CRQB needs alignment on a 1KB boundary. Size == 1KB
* CRPB needs alignment on a 256B boundary. Size == 256B
* ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
*/
MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
MV_MAX_SG_CT = 256,
MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
MV_PORT_HC_SHIFT = 2,
MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
/* Host Flags */
MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
CRQB_FLAG_READ = (1 << 0),
CRQB_TAG_SHIFT = 1,
CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
CRQB_CMD_ADDR_SHIFT = 8,
CRQB_CMD_CS = (0x2 << 11),
CRQB_CMD_LAST = (1 << 15),
CRPB_FLAG_STATUS_SHIFT = 8,
CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
EPRD_FLAG_END_OF_TBL = (1 << 31),
/* PCI interface registers */
MV_PCI_COMMAND = 0xc00,
MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
PCI_MAIN_CMD_STS = 0xd30,
STOP_PCI_MASTER = (1 << 2),
PCI_MASTER_EMPTY = (1 << 3),
GLOB_SFT_RST = (1 << 4),
MV_PCI_MODE = 0xd00,
MV_PCI_MODE_MASK = 0x30,
MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
MV_PCI_DISC_TIMER = 0xd04,
MV_PCI_MSI_TRIGGER = 0xc38,
MV_PCI_SERR_MASK = 0xc28,
MV_PCI_XBAR_TMOUT = 0x1d04,
MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
MV_PCI_ERR_ATTRIBUTE = 0x1d48,
MV_PCI_ERR_COMMAND = 0x1d50,
PCI_IRQ_CAUSE = 0x1d58,
PCI_IRQ_MASK = 0x1d5c,
PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
PCIE_IRQ_CAUSE = 0x1900,
PCIE_IRQ_MASK = 0x1910,
PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
PCI_HC_MAIN_IRQ_MASK = 0x1d64,
SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
SOC_HC_MAIN_IRQ_MASK = 0x20024,
ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
PCI_ERR = (1 << 18),
TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
GPIO_INT = (1 << 22),
SELF_INT = (1 << 23),
TWSI_INT = (1 << 24),
HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
/* SATAHC registers */
HC_CFG = 0x00,
HC_IRQ_CAUSE = 0x14,
DMA_IRQ = (1 << 0), /* shift by port # */
HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
DEV_IRQ = (1 << 8), /* shift by port # */
/*
* Per-HC (Host-Controller) interrupt coalescing feature.
* This is present on all chip generations.
*
* Coalescing defers the interrupt until either the IO_THRESHOLD
* (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
*/
HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
SOC_LED_CTRL = 0x2c,
SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
/* with dev activity LED */
/* Shadow block registers */
SHD_BLK = 0x100,
SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
/* SATA registers */
SATA_STATUS = 0x300, /* ctrl, err regs follow status */
SATA_ACTIVE = 0x350,
FIS_IRQ_CAUSE = 0x364,
FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
LTMODE = 0x30c, /* requires read-after-write */
LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
PHY_MODE2 = 0x330,
PHY_MODE3 = 0x310,
PHY_MODE4 = 0x314, /* requires read-after-write */
PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
SATA_IFCTL = 0x344,
SATA_TESTCTL = 0x348,
SATA_IFSTAT = 0x34c,
VENDOR_UNIQUE_FIS = 0x35c,
FISCFG = 0x360,
FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
PHY_MODE9_GEN2 = 0x398,
PHY_MODE9_GEN1 = 0x39c,
PHYCFG_OFS = 0x3a0, /* only in 65n devices */
MV5_PHY_MODE = 0x74,
MV5_LTMODE = 0x30,
MV5_PHY_CTL = 0x0C,
SATA_IFCFG = 0x050,
MV_M2_PREAMP_MASK = 0x7e0,
/* Port registers */
EDMA_CFG = 0,
EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
EDMA_ERR_IRQ_CAUSE = 0x8,
EDMA_ERR_IRQ_MASK = 0xc,
EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
EDMA_ERR_DEV = (1 << 2), /* device error */
EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
EDMA_ERR_OVERRUN_5 = (1 << 5),
EDMA_ERR_UNDERRUN_5 = (1 << 6),
EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
EDMA_ERR_LNK_CTRL_RX_1 |
EDMA_ERR_LNK_CTRL_RX_3 |
EDMA_ERR_LNK_CTRL_TX,
EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
EDMA_ERR_DEV_DCON |
EDMA_ERR_DEV_CON |
EDMA_ERR_SERR |
EDMA_ERR_SELF_DIS |
EDMA_ERR_CRQB_PAR |
EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR |
EDMA_ERR_IORDY |
EDMA_ERR_LNK_CTRL_RX_2 |
EDMA_ERR_LNK_DATA_RX |
EDMA_ERR_LNK_DATA_TX |
EDMA_ERR_TRANS_PROTO,
EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
EDMA_ERR_DEV_DCON |
EDMA_ERR_DEV_CON |
EDMA_ERR_OVERRUN_5 |
EDMA_ERR_UNDERRUN_5 |
EDMA_ERR_SELF_DIS_5 |
EDMA_ERR_CRQB_PAR |
EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR |
EDMA_ERR_IORDY,
EDMA_REQ_Q_BASE_HI = 0x10,
EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
EDMA_REQ_Q_OUT_PTR = 0x18,
EDMA_REQ_Q_PTR_SHIFT = 5,
EDMA_RSP_Q_BASE_HI = 0x1c,
EDMA_RSP_Q_IN_PTR = 0x20,
EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
EDMA_RSP_Q_PTR_SHIFT = 3,
EDMA_CMD = 0x28, /* EDMA command register */
EDMA_EN = (1 << 0), /* enable EDMA */
EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
EDMA_STATUS = 0x30, /* EDMA engine status */
EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
EDMA_IORDY_TMOUT = 0x34,
EDMA_ARB_CFG = 0x38,
EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
BMDMA_CMD = 0x224, /* bmdma command register */
BMDMA_STATUS = 0x228, /* bmdma status register */
BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
/* Host private flags (hp_flags) */
MV_HP_FLAG_MSI = (1 << 0),
MV_HP_ERRATA_50XXB0 = (1 << 1),
MV_HP_ERRATA_50XXB2 = (1 << 2),
MV_HP_ERRATA_60X1B2 = (1 << 3),
MV_HP_ERRATA_60X1C0 = (1 << 4),
MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
};
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
enum {
/* DMA boundary 0xffff is required by the s/g splitting
* we need on /length/ in mv_fill-sg().
*/
MV_DMA_BOUNDARY = 0xffffU,
/* mask of register bits containing lower 32 bits
* of EDMA request queue DMA address
*/
EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
/* ditto, for response queue */
EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
};
enum chip_type {
chip_504x,
chip_508x,
chip_5080,
chip_604x,
chip_608x,
chip_6042,
chip_7042,
chip_soc,
};
/* Command ReQuest Block: 32B */
struct mv_crqb {
__le32 sg_addr;
__le32 sg_addr_hi;
__le16 ctrl_flags;
__le16 ata_cmd[11];
};
struct mv_crqb_iie {
__le32 addr;
__le32 addr_hi;
__le32 flags;
__le32 len;
__le32 ata_cmd[4];
};
/* Command ResPonse Block: 8B */
struct mv_crpb {
__le16 id;
__le16 flags;
__le32 tmstmp;
};
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {
__le32 addr;
__le32 flags_size;
__le32 addr_hi;
__le32 reserved;
};
/*
* We keep a local cache of a few frequently accessed port
* registers here, to avoid having to read them (very slow)
* when switching between EDMA and non-EDMA modes.
*/
struct mv_cached_regs {
u32 fiscfg;
u32 ltmode;
u32 haltcond;
u32 unknown_rsvd;
};
struct mv_port_priv {
struct mv_crqb *crqb;
dma_addr_t crqb_dma;
struct mv_crpb *crpb;
dma_addr_t crpb_dma;
struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
unsigned int req_idx;
unsigned int resp_idx;
u32 pp_flags;
struct mv_cached_regs cached;
unsigned int delayed_eh_pmp_map;
};
struct mv_port_signal {
u32 amps;
u32 pre;
};
struct mv_host_priv {
u32 hp_flags;
unsigned int board_idx;
u32 main_irq_mask;
struct mv_port_signal signal[8];
const struct mv_hw_ops *ops;
int n_ports;
void __iomem *base;
void __iomem *main_irq_cause_addr;
void __iomem *main_irq_mask_addr;
u32 irq_cause_offset;
u32 irq_mask_offset;
u32 unmask_all_irqs;
#if defined(CONFIG_HAVE_CLK)
struct clk *clk;
#endif
/*
* These consistent DMA memory pools give us guaranteed
* alignment for hardware-accessed data structures,
* and less memory waste in accomplishing the alignment.
*/
struct dma_pool *crqb_pool;
struct dma_pool *crpb_pool;
struct dma_pool *sg_tbl_pool;
};
struct mv_hw_ops {
void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
};
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc);
static void mv_qc_prep(struct ata_queued_cmd *qc);
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
static void mv6_dev_config(struct ata_device *dev);
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
void __iomem *mmio);
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int n_hc);
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
void __iomem *mmio);
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int port);
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no);
static int mv_stop_edma(struct ata_port *ap);
static int mv_stop_edma_engine(void __iomem *port_mmio);
static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
static void mv_pmp_select(struct ata_port *ap, int pmp);
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int mv_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void mv_pmp_error_handler(struct ata_port *ap);
static void mv_process_crpb_entries(struct ata_port *ap,
struct mv_port_priv *pp);
static void mv_sff_irq_clear(struct ata_port *ap);
static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
static void mv_bmdma_setup(struct ata_queued_cmd *qc);
static void mv_bmdma_start(struct ata_queued_cmd *qc);
static void mv_bmdma_stop(struct ata_queued_cmd *qc);
static u8 mv_bmdma_status(struct ata_port *ap);
static u8 mv_sff_check_status(struct ata_port *ap);
/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
* because we have to allow room for worst case splitting of
* PRDs for 64K boundaries in mv_fill_sg().
*/
static struct scsi_host_template mv5_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
};
static struct scsi_host_template mv6_sht = {
ATA_NCQ_SHT(DRV_NAME),
.can_queue = MV_MAX_Q_DEPTH - 1,
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
};
static struct ata_port_operations mv5_ops = {
.inherits = &ata_sff_port_ops,
.lost_interrupt = ATA_OP_NULL,
.qc_defer = mv_qc_defer,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
.hardreset = mv_hardreset,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
.port_start = mv_port_start,
.port_stop = mv_port_stop,
};
static struct ata_port_operations mv6_ops = {
.inherits = &ata_bmdma_port_ops,
.lost_interrupt = ATA_OP_NULL,
.qc_defer = mv_qc_defer,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.dev_config = mv6_dev_config,
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
.hardreset = mv_hardreset,
.softreset = mv_softreset,
.pmp_hardreset = mv_pmp_hardreset,
.pmp_softreset = mv_softreset,
.error_handler = mv_pmp_error_handler,
.scr_read = mv_scr_read,
.scr_write = mv_scr_write,
.sff_check_status = mv_sff_check_status,
.sff_irq_clear = mv_sff_irq_clear,
.check_atapi_dma = mv_check_atapi_dma,
.bmdma_setup = mv_bmdma_setup,
.bmdma_start = mv_bmdma_start,
.bmdma_stop = mv_bmdma_stop,
.bmdma_status = mv_bmdma_status,
.port_start = mv_port_start,
.port_stop = mv_port_stop,
};
static struct ata_port_operations mv_iie_ops = {
.inherits = &mv6_ops,
.dev_config = ATA_OP_NULL,
.qc_prep = mv_qc_prep_iie,
};
static const struct ata_port_info mv_port_info[] = {
{ /* chip_504x */
.flags = MV_GEN_I_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_508x */
.flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_5080 */
.flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_604x */
.flags = MV_GEN_II_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_608x */
.flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_6042 */
.flags = MV_GEN_IIE_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
{ /* chip_7042 */
.flags = MV_GEN_IIE_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
{ /* chip_soc */
.flags = MV_GEN_IIE_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
};
static const struct pci_device_id mv_pci_tbl[] = {
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
/* RocketRAID 1720/174x have different identifiers */
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
/* Adaptec 1430SA */
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
/* Marvell 7042 support */
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
/* Highpoint RocketRAID PCIe series */
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
{ } /* terminate list */
};
static const struct mv_hw_ops mv5xxx_ops = {
.phy_errata = mv5_phy_errata,
.enable_leds = mv5_enable_leds,
.read_preamp = mv5_read_preamp,
.reset_hc = mv5_reset_hc,
.reset_flash = mv5_reset_flash,
.reset_bus = mv5_reset_bus,
};
static const struct mv_hw_ops mv6xxx_ops = {
.phy_errata = mv6_phy_errata,
.enable_leds = mv6_enable_leds,
.read_preamp = mv6_read_preamp,
.reset_hc = mv6_reset_hc,
.reset_flash = mv6_reset_flash,
.reset_bus = mv_reset_pci_bus,
};
static const struct mv_hw_ops mv_soc_ops = {
.phy_errata = mv6_phy_errata,
.enable_leds = mv_soc_enable_leds,
.read_preamp = mv_soc_read_preamp,
.reset_hc = mv_soc_reset_hc,
.reset_flash = mv_soc_reset_flash,
.reset_bus = mv_soc_reset_bus,
};
static const struct mv_hw_ops mv_soc_65n_ops = {
.phy_errata = mv_soc_65n_phy_errata,
.enable_leds = mv_soc_enable_leds,
.reset_hc = mv_soc_reset_hc,
.reset_flash = mv_soc_reset_flash,
.reset_bus = mv_soc_reset_bus,
};
/*
* Functions
*/
static inline void writelfl(unsigned long data, void __iomem *addr)
{
writel(data, addr);
(void) readl(addr); /* flush to avoid PCI posted write */
}
static inline unsigned int mv_hc_from_port(unsigned int port)
{
return port >> MV_PORT_HC_SHIFT;
}
static inline unsigned int mv_hardport_from_port(unsigned int port)
{
return port & MV_PORT_MASK;
}
/*
* Consolidate some rather tricky bit shift calculations.
* This is hot-path stuff, so not a function.
* Simple code, with two return values, so macro rather than inline.
*
* port is the sole input, in range 0..7.
* shift is one output, for use with main_irq_cause / main_irq_mask registers.
* hardport is the other output, in range 0..3.
*
* Note that port and hardport may be the same variable in some cases.
*/
#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
{ \
shift = mv_hc_from_port(port) * HC_SHIFT; \
hardport = mv_hardport_from_port(port); \
shift += hardport * 2; \
}
static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
{
return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
}
static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
unsigned int port)
{
return mv_hc_base(base, mv_hc_from_port(port));
}
static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
{
return mv_hc_base_from_port(base, port) +
MV_SATAHC_ARBTR_REG_SZ +
(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
}
static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
{
void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
return hc_mmio + ofs;
}
static inline void __iomem *mv_host_base(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
return hpriv->base;
}
static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
return mv_port_base(mv_host_base(ap->host), ap->port_no);
}
static inline int mv_get_hc_count(unsigned long port_flags)
{
return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
}
/**
* mv_save_cached_regs - (re-)initialize cached port registers
* @ap: the port whose registers we are caching
*
* Initialize the local cache of port registers,
* so that reading them over and over again can
* be avoided on the hotter paths of this driver.
* This saves a few microseconds each time we switch
* to/from EDMA mode to perform (eg.) a drive cache flush.
*/
static void mv_save_cached_regs(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
pp->cached.fiscfg = readl(port_mmio + FISCFG);
pp->cached.ltmode = readl(port_mmio + LTMODE);
pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
}
/**
* mv_write_cached_reg - write to a cached port register
* @addr: hardware address of the register
* @old: pointer to cached value of the register
* @new: new value for the register
*
* Write a new value to a cached register,
* but only if the value is different from before.
*/
static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
{
if (new != *old) {
unsigned long laddr;
*old = new;
/*
* Workaround for 88SX60x1-B2 FEr SATA#13:
* Read-after-write is needed to prevent generating 64-bit
* write cycles on the PCI bus for SATA interface registers
* at offsets ending in 0x4 or 0xc.
*
* Looks like a lot of fuss, but it avoids an unnecessary
* +1 usec read-after-write delay for unaffected registers.
*/
laddr = (long)addr & 0xffff;
if (laddr >= 0x300 && laddr <= 0x33c) {
laddr &= 0x000f;
if (laddr == 0x4 || laddr == 0xc) {
writelfl(new, addr); /* read after write */
return;
}
}
writel(new, addr); /* unaffected by the errata */
}
}
static void mv_set_edma_ptrs(void __iomem *port_mmio,
struct mv_host_priv *hpriv,
struct mv_port_priv *pp)
{
u32 index;
/*
* initialize request queue
*/
pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
WARN_ON(pp->crqb_dma & 0x3ff);
writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
port_mmio + EDMA_REQ_Q_IN_PTR);
writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
/*
* initialize response queue
*/
pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
WARN_ON(pp->crpb_dma & 0xff);
writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
port_mmio + EDMA_RSP_Q_OUT_PTR);
}
static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
{
/*
* When writing to the main_irq_mask in hardware,
* we must ensure exclusivity between the interrupt coalescing bits
* and the corresponding individual port DONE_IRQ bits.
*
* Note that this register is really an "IRQ enable" register,
* not an "IRQ mask" register as Marvell's naming might suggest.
*/
if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
mask &= ~DONE_IRQ_0_3;
if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
mask &= ~DONE_IRQ_4_7;
writelfl(mask, hpriv->main_irq_mask_addr);
}
static void mv_set_main_irq_mask(struct ata_host *host,
u32 disable_bits, u32 enable_bits)
{
struct mv_host_priv *hpriv = host->private_data;
u32 old_mask, new_mask;
old_mask = hpriv->main_irq_mask;
new_mask = (old_mask & ~disable_bits) | enable_bits;
if (new_mask != old_mask) {
hpriv->main_irq_mask = new_mask;
mv_write_main_irq_mask(new_mask, hpriv);
}
}
static void mv_enable_port_irqs(struct ata_port *ap,
unsigned int port_bits)
{
unsigned int shift, hardport, port = ap->port_no;
u32 disable_bits, enable_bits;
MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
enable_bits = port_bits << shift;
mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
}
static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
void __iomem *port_mmio,
unsigned int port_irqs)
{
struct mv_host_priv *hpriv = ap->host->private_data;
int hardport = mv_hardport_from_port(ap->port_no);
void __iomem *hc_mmio = mv_hc_base_from_port(
mv_host_base(ap->host), ap->port_no);
u32 hc_irq_cause;
/* clear EDMA event indicators, if any */
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
/* clear pending irq events */
hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
/* clear FIS IRQ Cause */
if (IS_GEN_IIE(hpriv))
writelfl(0, port_mmio + FIS_IRQ_CAUSE);
mv_enable_port_irqs(ap, port_irqs);
}
static void mv_set_irq_coalescing(struct ata_host *host,
unsigned int count, unsigned int usecs)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base, *hc_mmio;
u32 coal_enable = 0;
unsigned long flags;
unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
ALL_PORTS_COAL_DONE;
/* Disable IRQ coalescing if either threshold is zero */
if (!usecs || !count) {
clks = count = 0;
} else {
/* Respect maximum limits of the hardware */
clks = usecs * COAL_CLOCKS_PER_USEC;
if (clks > MAX_COAL_TIME_THRESHOLD)
clks = MAX_COAL_TIME_THRESHOLD;
if (count > MAX_COAL_IO_COUNT)
count = MAX_COAL_IO_COUNT;
}
spin_lock_irqsave(&host->lock, flags);
mv_set_main_irq_mask(host, coal_disable, 0);
if (is_dual_hc && !IS_GEN_I(hpriv)) {
/*
* GEN_II/GEN_IIE with dual host controllers:
* one set of global thresholds for the entire chip.
*/
writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
/* clear leftover coal IRQ bit */
writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
if (count)
coal_enable = ALL_PORTS_COAL_DONE;
clks = count = 0; /* force clearing of regular regs below */
}
/*
* All chips: independent thresholds for each HC on the chip.
*/
hc_mmio = mv_hc_base_from_port(mmio, 0);
writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
if (count)
coal_enable |= PORTS_0_3_COAL_DONE;
if (is_dual_hc) {
hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
if (count)
coal_enable |= PORTS_4_7_COAL_DONE;
}
mv_set_main_irq_mask(host, 0, coal_enable);
spin_unlock_irqrestore(&host->lock, flags);
}
/**
* mv_start_edma - Enable eDMA engine
* @base: port base address
* @pp: port private data
*
* Verify the local cache of the eDMA state is accurate with a
* WARN_ON.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
struct mv_port_priv *pp, u8 protocol)
{
int want_ncq = (protocol == ATA_PROT_NCQ);
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
if (want_ncq != using_ncq)
mv_stop_edma(ap);
}
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
struct mv_host_priv *hpriv = ap->host->private_data;
mv_edma_cfg(ap, want_ncq, 1);
mv_set_edma_ptrs(port_mmio, hpriv, pp);
mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
writelfl(EDMA_EN, port_mmio + EDMA_CMD);
pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
}
}
static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
const int per_loop = 5, timeout = (15 * 1000 / per_loop);
int i;
/*
* Wait for the EDMA engine to finish transactions in progress.
* No idea what a good "timeout" value might be, but measurements
* indicate that it often requires hundreds of microseconds
* with two drives in-use. So we use the 15msec value above
* as a rough guess at what even more drives might require.
*/
for (i = 0; i < timeout; ++i) {
u32 edma_stat = readl(port_mmio + EDMA_STATUS);
if ((edma_stat & empty_idle) == empty_idle)
break;
udelay(per_loop);
}
/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
}
/**
* mv_stop_edma_engine - Disable eDMA engine
* @port_mmio: io base address
*
* LOCKING:
* Inherited from caller.
*/
static int mv_stop_edma_engine(void __iomem *port_mmio)
{
int i;
/* Disable eDMA. The disable bit auto clears. */
writelfl(EDMA_DS, port_mmio + EDMA_CMD);
/* Wait for the chip to confirm eDMA is off. */
for (i = 10000; i > 0; i--) {
u32 reg = readl(port_mmio + EDMA_CMD);
if (!(reg & EDMA_EN))
return 0;
udelay(10);
}
return -EIO;
}
static int mv_stop_edma(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
int err = 0;
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
return 0;
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
mv_wait_for_edma_empty_idle(ap);
if (mv_stop_edma_engine(port_mmio)) {
ata_port_err(ap, "Unable to stop eDMA\n");
err = -EIO;
}
mv_edma_cfg(ap, 0, 0);
return err;
}
#ifdef ATA_DEBUG
static void mv_dump_mem(void __iomem *start, unsigned bytes)
{
int b, w;
for (b = 0; b < bytes; ) {
DPRINTK("%p: ", start + b);
for (w = 0; b < bytes && w < 4; w++) {
printk("%08x ", readl(start + b));
b += sizeof(u32);
}
printk("\n");
}
}
#endif
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{
#ifdef ATA_DEBUG
int b, w;
u32 dw;
for (b = 0; b < bytes; ) {
DPRINTK("%02x: ", b);
for (w = 0; b < bytes && w < 4; w++) {
(void) pci_read_config_dword(pdev, b, &dw);
printk("%08x ", dw);
b += sizeof(u32);
}
printk("\n");
}
#endif
}
static void mv_dump_all_regs(void __iomem *mmio_base, int port,
struct pci_dev *pdev)
{
#ifdef ATA_DEBUG
void __iomem *hc_base = mv_hc_base(mmio_base,
port >> MV_PORT_HC_SHIFT);
void __iomem *port_base;
int start_port, num_ports, p, start_hc, num_hcs, hc;
if (0 > port) {
start_hc = start_port = 0;
num_ports = 8; /* shld be benign for 4 port devs */
num_hcs = 2;
} else {
start_hc = port >> MV_PORT_HC_SHIFT;
start_port = port;
num_ports = num_hcs = 1;
}
DPRINTK("All registers for port(s) %u-%u:\n", start_port,
num_ports > 1 ? num_ports - 1 : start_port);
if (NULL != pdev) {
DPRINTK("PCI config space regs:\n");
mv_dump_pci_cfg(pdev, 0x68);
}
DPRINTK("PCI regs:\n");
mv_dump_mem(mmio_base+0xc00, 0x3c);
mv_dump_mem(mmio_base+0xd00, 0x34);
mv_dump_mem(mmio_base+0xf00, 0x4);
mv_dump_mem(mmio_base+0x1d00, 0x6c);
for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
hc_base = mv_hc_base(mmio_base, hc);
DPRINTK("HC regs (HC %i):\n", hc);
mv_dump_mem(hc_base, 0x1c);
}
for (p = start_port; p < start_port + num_ports; p++) {
port_base = mv_port_base(mmio_base, p);
DPRINTK("EDMA regs (port %i):\n", p);
mv_dump_mem(port_base, 0x54);
DPRINTK("SATA regs (port %i):\n", p);
mv_dump_mem(port_base+0x300, 0x60);
}
#endif
}
static unsigned int mv_scr_offset(unsigned int sc_reg_in)
{
unsigned int ofs;
switch (sc_reg_in) {
case SCR_STATUS:
case SCR_CONTROL:
case SCR_ERROR:
ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
break;
case SCR_ACTIVE:
ofs = SATA_ACTIVE; /* active is not with the others */
break;
default:
ofs = 0xffffffffU;
break;
}
return ofs;
}
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
*val = readl(mv_ap_base(link->ap) + ofs);
return 0;
} else
return -EINVAL;
}
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
void __iomem *addr = mv_ap_base(link->ap) + ofs;
if (sc_reg_in == SCR_CONTROL) {
/*
* Workaround for 88SX60x1 FEr SATA#26:
*
* COMRESETs have to take care not to accidentally
* put the drive to sleep when writing SCR_CONTROL.
* Setting bits 12..15 prevents this problem.
*
* So if we see an outbound COMMRESET, set those bits.
* Ditto for the followup write that clears the reset.
*
* The proprietary driver does this for
* all chip versions, and so do we.
*/
if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
val |= 0xf000;
}
writelfl(val, addr);
return 0;
} else
return -EINVAL;
}
static void mv6_dev_config(struct ata_device *adev)
{
/*
* Deal with Gen-II ("mv6") hardware quirks/restrictions:
*
* Gen-II does not support NCQ over a port multiplier
* (no FIS-based switching).
*/
if (adev->flags & ATA_DFLAG_NCQ) {
if (sata_pmp_attached(adev->link->ap)) {
adev->flags &= ~ATA_DFLAG_NCQ;
ata_dev_info(adev,
"NCQ disabled for command-based switching\n");
}
}
}
static int mv_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_link *link = qc->dev->link;
struct ata_port *ap = link->ap;
struct mv_port_priv *pp = ap->private_data;
/*
* Don't allow new commands if we're in a delayed EH state
* for NCQ and/or FIS-based switching.
*/
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
return ATA_DEFER_PORT;
/* PIO commands need exclusive link: no other commands [DMA or PIO]
* can run concurrently.
* set excl_link when we want to send a PIO command in DMA mode
* or a non-NCQ command in NCQ mode.
* When we receive a command from that link, and there are no
* outstanding commands, mark a flag to clear excl_link and let
* the command go through.
*/
if (unlikely(ap->excl_link)) {
if (link == ap->excl_link) {
if (ap->nr_active_links)
return ATA_DEFER_PORT;
qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
return 0;
} else
return ATA_DEFER_PORT;
}
/*
* If the port is completely idle, then allow the new qc.
*/
if (ap->nr_active_links == 0)
return 0;
/*
* The port is operating in host queuing mode (EDMA) with NCQ
* enabled, allow multiple NCQ commands. EDMA also allows
* queueing multiple DMA commands but libata core currently
* doesn't allow it.
*/
if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
if (ata_is_ncq(qc->tf.protocol))
return 0;
else {
ap->excl_link = link;
return ATA_DEFER_PORT;
}
}
return ATA_DEFER_PORT;
}
static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
{
struct mv_port_priv *pp = ap->private_data;
void __iomem *port_mmio;
u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
u32 ltmode, *old_ltmode = &pp->cached.ltmode;
u32 haltcond, *old_haltcond = &pp->cached.haltcond;
ltmode = *old_ltmode & ~LTMODE_BIT8;
haltcond = *old_haltcond | EDMA_ERR_DEV;
if (want_fbs) {
fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
ltmode = *old_ltmode | LTMODE_BIT8;
if (want_ncq)
haltcond &= ~EDMA_ERR_DEV;
else
fiscfg |= FISCFG_WAIT_DEV_ERR;
} else {
fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
}
port_mmio = mv_ap_base(ap);
mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
}
static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
{
struct mv_host_priv *hpriv = ap->host->private_data;
u32 old, new;
/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
old = readl(hpriv->base + GPIO_PORT_CTL);
if (want_ncq)
new = old | (1 << 22);
else
new = old & ~(1 << 22);
if (new != old)
writel(new, hpriv->base + GPIO_PORT_CTL);
}
/**
* mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
* @ap: Port being initialized
*
* There are two DMA modes on these chips: basic DMA, and EDMA.
*
* Bit-0 of the "EDMA RESERVED" register enables/disables use
* of basic DMA on the GEN_IIE versions of the chips.
*
* This bit survives EDMA resets, and must be set for basic DMA
* to function, and should be cleared when EDMA is active.
*/
static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
{
struct mv_port_priv *pp = ap->private_data;
u32 new, *old = &pp->cached.unknown_rsvd;
if (enable_bmdma)
new = *old | 1;
else
new = *old & ~1;
mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
}
/*
* SOC chips have an issue whereby the HDD LEDs don't always blink
* during I/O when NCQ is enabled. Enabling a special "LED blink" mode
* of the SOC takes care of it, generating a steady blink rate when
* any drive on the chip is active.
*
* Unfortunately, the blink mode is a global hardware setting for the SOC,
* so we must use it whenever at least one port on the SOC has NCQ enabled.
*
* We turn "LED blink" off when NCQ is not in use anywhere, because the normal
* LED operation works then, and provides better (more accurate) feedback.
*
* Note that this code assumes that an SOC never has more than one HC onboard.
*/
static void mv_soc_led_blink_enable(struct ata_port *ap)
{
struct ata_host *host = ap->host;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *hc_mmio;
u32 led_ctrl;
if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
return;
hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
}
static void mv_soc_led_blink_disable(struct ata_port *ap)
{
struct ata_host *host = ap->host;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *hc_mmio;
u32 led_ctrl;
unsigned int port;
if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
return;
/* disable led-blink only if no ports are using NCQ */
for (port = 0; port < hpriv->n_ports; port++) {
struct ata_port *this_ap = host->ports[port];
struct mv_port_priv *pp = this_ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
return;
}
hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
}
static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
{
u32 cfg;
struct mv_port_priv *pp = ap->private_data;
struct mv_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = mv_ap_base(ap);
/* set up non-NCQ EDMA configuration */
cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
pp->pp_flags &=
~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
if (IS_GEN_I(hpriv))
cfg |= (1 << 8); /* enab config burst size mask */
else if (IS_GEN_II(hpriv)) {
cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
mv_60x1_errata_sata25(ap, want_ncq);
} else if (IS_GEN_IIE(hpriv)) {
int want_fbs = sata_pmp_attached(ap);
/*
* Possible future enhancement:
*
* The chip can use FBS with non-NCQ, if we allow it,
* But first we need to have the error handling in place
* for this mode (datasheet section 7.3.15.4.2.3).
* So disallow non-NCQ FBS for now.
*/
want_fbs &= want_ncq;
mv_config_fbs(ap, want_ncq, want_fbs);
if (want_fbs) {
pp->pp_flags |= MV_PP_FLAG_FBS_EN;
cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
}
cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
if (want_edma) {
cfg |= (1 << 22); /* enab 4-entry host queue cache */
if (!IS_SOC(hpriv))
cfg |= (1 << 18); /* enab early completion */
}
if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
mv_bmdma_enable_iie(ap, !want_edma);
if (IS_SOC(hpriv)) {
if (want_ncq)
mv_soc_led_blink_enable(ap);
else
mv_soc_led_blink_disable(ap);
}
}
if (want_ncq) {
cfg |= EDMA_CFG_NCQ;
pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
}
writelfl(cfg, port_mmio + EDMA_CFG);
}
static void mv_port_free_dma_mem(struct ata_port *ap)
{
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp = ap->private_data;
int tag;
if (pp->crqb) {
dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
pp->crqb = NULL;
}
if (pp->crpb) {
dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
pp->crpb = NULL;
}
/*
* For GEN_I, there's no NCQ, so we have only a single sg_tbl.
* For later hardware, we have one unique sg_tbl per NCQ tag.
*/
for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
if (pp->sg_tbl[tag]) {
if (tag == 0 || !IS_GEN_I(hpriv))
dma_pool_free(hpriv->sg_tbl_pool,
pp->sg_tbl[tag],
pp->sg_tbl_dma[tag]);
pp->sg_tbl[tag] = NULL;
}
}
}
/**
* mv_port_start - Port specific init/start routine.
* @ap: ATA channel to manipulate
*
* Allocate and point to DMA memory, init port private memory,
* zero indices.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp;
unsigned long flags;
int tag;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
ap->private_data = pp;
pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
if (!pp->crqb)
return -ENOMEM;
memset(pp->crqb, 0, MV_CRQB_Q_SZ);
pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
if (!pp->crpb)
goto out_port_free_dma_mem;
memset(pp->crpb, 0, MV_CRPB_Q_SZ);
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
ap->flags |= ATA_FLAG_AN;
/*
* For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
* For later hardware, we need one unique sg_tbl per NCQ tag.
*/
for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
if (tag == 0 || !IS_GEN_I(hpriv)) {
pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
GFP_KERNEL, &pp->sg_tbl_dma[tag]);
if (!pp->sg_tbl[tag])
goto out_port_free_dma_mem;
} else {
pp->sg_tbl[tag] = pp->sg_tbl[0];
pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
}
}
spin_lock_irqsave(ap->lock, flags);
mv_save_cached_regs(ap);
mv_edma_cfg(ap, 0, 0);
spin_unlock_irqrestore(ap->lock, flags);
return 0;
out_port_free_dma_mem:
mv_port_free_dma_mem(ap);
return -ENOMEM;
}
/**
* mv_port_stop - Port specific cleanup/stop routine.
* @ap: ATA channel to manipulate
*
* Stop DMA, cleanup port memory.
*
* LOCKING:
* This routine uses the host lock to protect the DMA stop.
*/
static void mv_port_stop(struct ata_port *ap)
{
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
mv_stop_edma(ap);
mv_enable_port_irqs(ap, 0);
spin_unlock_irqrestore(ap->lock, flags);
mv_port_free_dma_mem(ap);
}
/**
* mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
* @qc: queued command whose SG list to source from
*
* Populate the SG list and mark the last entry.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_fill_sg(struct ata_queued_cmd *qc)
{
struct mv_port_priv *pp = qc->ap->private_data;
struct scatterlist *sg;
struct mv_sg *mv_sg, *last_sg = NULL;
unsigned int si;
mv_sg = pp->sg_tbl[qc->tag];
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
while (sg_len) {
u32 offset = addr & 0xffff;
u32 len = sg_len;
if (offset + len > 0x10000)
len = 0x10000 - offset;
mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
mv_sg->flags_size = cpu_to_le32(len & 0xffff);
mv_sg->reserved = 0;
sg_len -= len;
addr += len;
last_sg = mv_sg;
mv_sg++;
}
}
if (likely(last_sg))
last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
mb(); /* ensure data structure is visible to the chipset */
}
static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
{
u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
(last ? CRQB_CMD_LAST : 0);
*cmdw = cpu_to_le16(tmp);
}
/**
* mv_sff_irq_clear - Clear hardware interrupt after DMA.
* @ap: Port associated with this ATA transaction.
*
* We need this only for ATAPI bmdma transactions,
* as otherwise we experience spurious interrupts
* after libata-sff handles the bmdma interrupts.
*/
static void mv_sff_irq_clear(struct ata_port *ap)
{
mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
}
/**
* mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
* @qc: queued command to check for chipset/DMA compatibility.
*
* The bmdma engines cannot handle speculative data sizes
* (bytecount under/over flow). So only allow DMA for
* data transfer commands with known data sizes.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
if (scmd) {
switch (scmd->cmnd[0]) {
case READ_6:
case READ_10:
case READ_12:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case GPCMD_READ_CD:
case GPCMD_SEND_DVD_STRUCTURE:
case GPCMD_SEND_CUE_SHEET:
return 0; /* DMA is safe */
}
}
return -EOPNOTSUPP; /* use PIO instead */
}
/**
* mv_bmdma_setup - Set up BMDMA transaction
* @qc: queued command to prepare DMA for.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
mv_fill_sg(qc);
/* clear all DMA cmd bits */
writel(0, port_mmio + BMDMA_CMD);
/* load PRD table addr. */
writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
port_mmio + BMDMA_PRD_HIGH);
writelfl(pp->sg_tbl_dma[qc->tag],
port_mmio + BMDMA_PRD_LOW);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
/**
* mv_bmdma_start - Start a BMDMA transaction
* @qc: queued command to start DMA on.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
/* start host DMA transaction */
writelfl(cmd, port_mmio + BMDMA_CMD);
}
/**
* mv_bmdma_stop - Stop BMDMA transfer
* @qc: queued command to stop DMA on.
*
* Clears the ATA_DMA_START flag in the bmdma control register
*
* LOCKING:
* Inherited from caller.
*/
static void mv_bmdma_stop_ap(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD);
if (cmd & ATA_DMA_START) {
cmd &= ~ATA_DMA_START;
writelfl(cmd, port_mmio + BMDMA_CMD);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_sff_dma_pause(ap);
}
}
static void mv_bmdma_stop(struct ata_queued_cmd *qc)
{
mv_bmdma_stop_ap(qc->ap);
}
/**
* mv_bmdma_status - Read BMDMA status
* @ap: port for which to retrieve DMA status.
*
* Read and return equivalent of the sff BMDMA status register.
*
* LOCKING:
* Inherited from caller.
*/
static u8 mv_bmdma_status(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 reg, status;
/*
* Other bits are valid only if ATA_DMA_ACTIVE==0,
* and the ATA_DMA_INTR bit doesn't exist.
*/
reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE;
else if (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
else {
/*
* Just because DMA_ACTIVE is 0 (DMA completed),
* this does _not_ mean the device is "done".
* So we should not yet be signalling ATA_DMA_INTR
* in some cases. Eg. DSM/TRIM, and perhaps others.
*/
mv_bmdma_stop_ap(ap);
if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
status = 0;
else
status = ATA_DMA_INTR;
}
return status;
}
static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
/*
* Workaround for 88SX60x1 FEr SATA#24.
*
* Chip may corrupt WRITEs if multi_count >= 4kB.
* Note that READs are unaffected.
*
* It's not clear if this errata really means "4K bytes",
* or if it always happens for multi_count > 7
* regardless of device sector_size.
*
* So, for safety, any write with multi_count > 7
* gets converted here into a regular PIO write instead:
*/
if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
if (qc->dev->multi_count > 7) {
switch (tf->command) {
case ATA_CMD_WRITE_MULTI:
tf->command = ATA_CMD_PIO_WRITE;
break;
case ATA_CMD_WRITE_MULTI_FUA_EXT:
tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
/* fall through */
case ATA_CMD_WRITE_MULTI_EXT:
tf->command = ATA_CMD_PIO_WRITE_EXT;
break;
}
}
}
}
/**
* mv_qc_prep - Host specific command preparation.
* @qc: queued command to prepare
*
* This routine simply redirects to the general purpose routine
* if command is not DMA. Else, it handles prep of the CRQB
* (command request block), does some sanity checking, and calls
* the SG load routine.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
__le16 *cw;
struct ata_taskfile *tf = &qc->tf;
u16 flags = 0;
unsigned in_index;
switch (tf->protocol) {
case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
return;
/* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
mv_rw_multi_errata_sata24(qc);
return;
default:
return;
}
/* Fill in command request block
*/
if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx;
pp->crqb[in_index].sg_addr =
cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
pp->crqb[in_index].sg_addr_hi =
cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
cw = &pp->crqb[in_index].ata_cmd[0];
/* Sadly, the CRQB cannot accommodate all registers--there are
* only 11 bytes...so we must pick and choose required
* registers based on the command. So, we drop feature and
* hob_feature for [RW] DMA commands, but they are needed for
* NCQ. NCQ will drop hob_nsect, which is not needed there
* (nsect is used only for the tag; feat/hob_feat hold true nsect).
*/
switch (tf->command) {
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
case ATA_CMD_WRITE_FUA_EXT:
mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
break;
case ATA_CMD_FPDMA_READ:
case ATA_CMD_FPDMA_WRITE:
mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
break;
default:
/* The only other commands EDMA supports in non-queued and
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
* of which are defined/used by Linux. If we get here, this
* driver needs work.
*
* FIXME: modify libata to give qc_prep a return value and
* return error here.
*/
BUG_ON(tf->command);
break;
}
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
mv_fill_sg(qc);
}
/**
* mv_qc_prep_iie - Host specific command preparation.
* @qc: queued command to prepare
*
* This routine simply redirects to the general purpose routine
* if command is not DMA. Else, it handles prep of the CRQB
* (command request block), does some sanity checking, and calls
* the SG load routine.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
struct mv_crqb_iie *crqb;
struct ata_taskfile *tf = &qc->tf;
unsigned in_index;
u32 flags = 0;
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
return;
if (tf->command == ATA_CMD_DSM)
return; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
flags |= qc->tag << CRQB_HOSTQ_SHIFT;
flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx;
crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
crqb->flags = cpu_to_le32(flags);
crqb->ata_cmd[0] = cpu_to_le32(
(tf->command << 16) |
(tf->feature << 24)
);
crqb->ata_cmd[1] = cpu_to_le32(
(tf->lbal << 0) |
(tf->lbam << 8) |
(tf->lbah << 16) |
(tf->device << 24)
);
crqb->ata_cmd[2] = cpu_to_le32(
(tf->hob_lbal << 0) |
(tf->hob_lbam << 8) |
(tf->hob_lbah << 16) |
(tf->hob_feature << 24)
);
crqb->ata_cmd[3] = cpu_to_le32(
(tf->nsect << 0) |
(tf->hob_nsect << 8)
);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
mv_fill_sg(qc);
}
/**
* mv_sff_check_status - fetch device status, if valid
* @ap: ATA port to fetch status from
*
* When using command issue via mv_qc_issue_fis(),
* the initial ATA_BUSY state does not show up in the
* ATA status (shadow) register. This can confuse libata!
*
* So we have a hook here to fake ATA_BUSY for that situation,
* until the first time a BUSY, DRQ, or ERR bit is seen.
*
* The rest of the time, it simply returns the ATA status register.
*/
static u8 mv_sff_check_status(struct ata_port *ap)
{
u8 stat = ioread8(ap->ioaddr.status_addr);
struct mv_port_priv *pp = ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
else
stat = ATA_BUSY;
}
return stat;
}
/**
* mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
* @fis: fis to be sent
* @nwords: number of 32-bit words in the fis
*/
static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 ifctl, old_ifctl, ifstat;
int i, timeout = 200, final_word = nwords - 1;
/* Initiate FIS transmission mode */
old_ifctl = readl(port_mmio + SATA_IFCTL);
ifctl = 0x100 | (old_ifctl & 0xf);
writelfl(ifctl, port_mmio + SATA_IFCTL);
/* Send all words of the FIS except for the final word */
for (i = 0; i < final_word; ++i)
writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
/* Flag end-of-transmission, and then send the final word */
writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
/*
* Wait for FIS transmission to complete.
* This typically takes just a single iteration.
*/
do {
ifstat = readl(port_mmio + SATA_IFSTAT);
} while (!(ifstat & 0x1000) && --timeout);
/* Restore original port configuration */
writelfl(old_ifctl, port_mmio + SATA_IFCTL);
/* See if it worked */
if ((ifstat & 0x3000) != 0x1000) {
ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
__func__, ifstat);
return AC_ERR_OTHER;
}
return 0;
}
/**
* mv_qc_issue_fis - Issue a command directly as a FIS
* @qc: queued command to start
*
* Note that the ATA shadow registers are not updated
* after command issue, so the device will appear "READY"
* if polled, even while it is BUSY processing the command.
*
* So we use a status hook to fake ATA_BUSY until the drive changes state.
*
* Note: we don't get updated shadow regs on *completion*
* of non-data commands. So avoid sending them via this function,
* as they will appear to have completed immediately.
*
* GEN_IIE has special registers that we could get the result tf from,
* but earlier chipsets do not. For now, we ignore those registers.
*/
static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
struct ata_link *link = qc->dev->link;
u32 fis[5];
int err = 0;
ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
if (err)
return err;
switch (qc->tf.protocol) {
case ATAPI_PROT_PIO:
pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
/* fall through */
case ATAPI_PROT_NODATA:
ap->hsm_task_state = HSM_ST_FIRST;
break;
case ATA_PROT_PIO:
pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
if (qc->tf.flags & ATA_TFLAG_WRITE)
ap->hsm_task_state = HSM_ST_FIRST;
else
ap->hsm_task_state = HSM_ST;
break;
default:
ap->hsm_task_state = HSM_ST_LAST;
break;
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_sff_queue_pio_task(link, 0);
return 0;
}
/**
* mv_qc_issue - Initiate a command to the host
* @qc: queued command to start
*
* This routine simply redirects to the general purpose routine
* if command is not DMA. Else, it sanity checks our local
* caches of the request producer/consumer indices then enables
* DMA and bumps the request producer index.
*
* LOCKING:
* Inherited from caller.
*/
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
{
static int limit_warnings = 10;
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
u32 in_index;
unsigned int port_irqs;
pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
if (qc->tf.command == ATA_CMD_DSM) {
if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
return AC_ERR_OTHER;
break; /* use bmdma for this */
}
/* fall thru */
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
/* Write the request in pointer to kick the EDMA to life */
writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
port_mmio + EDMA_REQ_Q_IN_PTR);
return 0;
case ATA_PROT_PIO:
/*
* Errata SATA#16, SATA#24: warn if multiple DRQs expected.
*
* Someday, we might implement special polling workarounds
* for these, but it all seems rather unnecessary since we
* normally use only DMA for commands which transfer more
* than a single block of data.
*
* Much of the time, this could just work regardless.
* So for now, just log the incident, and allow the attempt.
*/
if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
--limit_warnings;
ata_link_warn(qc->dev->link, DRV_NAME
": attempting PIO w/multiple DRQ: "
"this may fail due to h/w errata\n");
}
/* drop through */
case ATA_PROT_NODATA:
case ATAPI_PROT_PIO:
case ATAPI_PROT_NODATA:
if (ap->flags & ATA_FLAG_PIO_POLLING)
qc->tf.flags |= ATA_TFLAG_POLLING;
break;
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
port_irqs = ERR_IRQ; /* mask device interrupt when polling */
else
port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
/*
* We're about to send a non-EDMA capable command to the
* port. Turn off EDMA so there won't be problems accessing
* shadow block, etc registers.
*/
mv_stop_edma(ap);
mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
mv_pmp_select(ap, qc->dev->link->pmp);
if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
struct mv_host_priv *hpriv = ap->host->private_data;
/*
* Workaround for 88SX60x1 FEr SATA#25 (part 2).
*
* After any NCQ error, the READ_LOG_EXT command
* from libata-eh *must* use mv_qc_issue_fis().
* Otherwise it might fail, due to chip errata.
*
* Rather than special-case it, we'll just *always*
* use this method here for READ_LOG_EXT, making for
* easier testing.
*/
if (IS_GEN_II(hpriv))
return mv_qc_issue_fis(qc);
}
return ata_bmdma_qc_issue(qc);
}
static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
{
struct mv_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
return NULL;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
return qc;
return NULL;
}
static void mv_pmp_error_handler(struct ata_port *ap)
{
unsigned int pmp, pmp_map;
struct mv_port_priv *pp = ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
/*
* Perform NCQ error analysis on failed PMPs
* before we freeze the port entirely.
*
* The failed PMPs are marked earlier by mv_pmp_eh_prep().
*/
pmp_map = pp->delayed_eh_pmp_map;
pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
for (pmp = 0; pmp_map != 0; pmp++) {
unsigned int this_pmp = (1 << pmp);
if (pmp_map & this_pmp) {
struct ata_link *link = &ap->pmp_link[pmp];
pmp_map &= ~this_pmp;
ata_eh_analyze_ncq_error(link);
}
}
ata_port_freeze(ap);
}
sata_pmp_error_handler(ap);
}
static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
return readl(port_mmio + SATA_TESTCTL) >> 16;
}
static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
{
struct ata_eh_info *ehi;
unsigned int pmp;
/*
* Initialize EH info for PMPs which saw device errors
*/
ehi = &ap->link.eh_info;
for (pmp = 0; pmp_map != 0; pmp++) {
unsigned int this_pmp = (1 << pmp);
if (pmp_map & this_pmp) {
struct ata_link *link = &ap->pmp_link[pmp];
pmp_map &= ~this_pmp;
ehi = &link->eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "dev err");
ehi->err_mask |= AC_ERR_DEV;
ehi->action |= ATA_EH_RESET;
ata_link_abort(link);
}
}
}
static int mv_req_q_empty(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 in_ptr, out_ptr;
in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
return (in_ptr == out_ptr); /* 1 == queue_is_empty */
}
static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
{
struct mv_port_priv *pp = ap->private_data;
int failed_links;
unsigned int old_map, new_map;
/*
* Device error during FBS+NCQ operation:
*
* Set a port flag to prevent further I/O being enqueued.
* Leave the EDMA running to drain outstanding commands from this port.
* Perform the post-mortem/EH only when all responses are complete.
* Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
*/
if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
pp->delayed_eh_pmp_map = 0;
}
old_map = pp->delayed_eh_pmp_map;
new_map = old_map | mv_get_err_pmp_map(ap);
if (old_map != new_map) {
pp->delayed_eh_pmp_map = new_map;
mv_pmp_eh_prep(ap, new_map & ~old_map);
}
failed_links = hweight16(new_map);
ata_port_info(ap,
"%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
__func__, pp->delayed_eh_pmp_map,
ap->qc_active, failed_links,
ap->nr_active_links);
if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
mv_process_crpb_entries(ap, pp);
mv_stop_edma(ap);
mv_eh_freeze(ap);
ata_port_info(ap, "%s: done\n", __func__);
return 1; /* handled */
}
ata_port_info(ap, "%s: waiting\n", __func__);
return 1; /* handled */
}
static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
{
/*
* Possible future enhancement:
*
* FBS+non-NCQ operation is not yet implemented.
* See related notes in mv_edma_cfg().
*
* Device error during FBS+non-NCQ operation:
*
* We need to snapshot the shadow registers for each failed command.
* Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
*/
return 0; /* not handled */
}
static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
{
struct mv_port_priv *pp = ap->private_data;
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
return 0; /* EDMA was not active: not handled */
if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
return 0; /* FBS was not active: not handled */
if (!(edma_err_cause & EDMA_ERR_DEV))
return 0; /* non DEV error: not handled */
edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
return 0; /* other problems: not handled */
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
/*
* EDMA should NOT have self-disabled for this case.
* If it did, then something is wrong elsewhere,
* and we cannot handle it here.
*/
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
__func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_ncq_dev_err(ap);
} else {
/*
* EDMA should have self-disabled for this case.
* If it did not, then something is wrong elsewhere,
* and we cannot handle it here.
*/
if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
__func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_non_ncq_dev_err(ap);
}
return 0; /* not handled */
}
static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
char *when = "idle";
ata_ehi_clear_desc(ehi);
if (edma_was_enabled) {
when = "EDMA enabled";
} else {
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
when = "polling";
}
ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
ehi->err_mask |= AC_ERR_OTHER;
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
}
/**
* mv_err_intr - Handle error interrupts on the port
* @ap: ATA channel to manipulate
*
* Most cases require a full reset of the chip's state machine,
* which also performs a COMRESET.
* Also, if the port disabled DMA, update our cached copy to match.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_err_intr(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 edma_err_cause, eh_freeze_mask, serr = 0;
u32 fis_cause = 0;
struct mv_port_priv *pp = ap->private_data;
struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int action = 0, err_mask = 0;
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ata_queued_cmd *qc;
int abort = 0;
/*
* Read and clear the SError and err_cause bits.
* For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
* the FIS_IRQ_CAUSE register before clearing edma_err_cause.
*/
sata_scr_read(&ap->link, SCR_ERROR, &serr);
sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
}
writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
if (edma_err_cause & EDMA_ERR_DEV) {
/*
* Device errors during FIS-based switching operation
* require special handling.
*/
if (mv_handle_dev_err(ap, edma_err_cause))
return;
}
qc = mv_get_active_qc(ap);
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
edma_err_cause, pp->pp_flags);
if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
if (fis_cause & FIS_IRQ_CAUSE_AN) {
u32 ec = edma_err_cause &
~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
sata_async_notification(ap);
if (!ec)
return; /* Just an AN; no need for the nukes */
ata_ehi_push_desc(ehi, "SDB notify");
}
}
/*
* All generations share these EDMA error cause bits:
*/
if (edma_err_cause & EDMA_ERR_DEV) {
err_mask |= AC_ERR_DEV;
action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "dev error");
}
if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR)) {
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "parity error");
}
if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
"dev disconnect" : "dev connect");
action |= ATA_EH_RESET;
}
/*
* Gen-I has a different SELF_DIS bit,
* different FREEZE bits, and no SERR bit:
*/
if (IS_GEN_I(hpriv)) {
eh_freeze_mask = EDMA_EH_FREEZE_5;
if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
} else {
eh_freeze_mask = EDMA_EH_FREEZE;
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
if (edma_err_cause & EDMA_ERR_SERR) {
ata_ehi_push_desc(ehi, "SError=%08x", serr);
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_RESET;
}
}
if (!err_mask) {
err_mask = AC_ERR_OTHER;
action |= ATA_EH_RESET;
}
ehi->serror |= serr;
ehi->action |= action;
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
if (err_mask == AC_ERR_DEV) {
/*
* Cannot do ata_port_freeze() here,
* because it would kill PIO access,
* which is needed for further diagnosis.
*/
mv_eh_freeze(ap);
abort = 1;
} else if (edma_err_cause & eh_freeze_mask) {
/*
* Note to self: ata_port_freeze() calls ata_port_abort()
*/
ata_port_freeze(ap);
} else {
abort = 1;
}
if (abort) {
if (qc)
ata_link_abort(qc->dev->link);
else
ata_port_abort(ap);
}
}
static bool mv_process_crpb_response(struct ata_port *ap,
struct mv_crpb *response, unsigned int tag, int ncq_enabled)
{
u8 ata_status;
u16 edma_status = le16_to_cpu(response->flags);
/*
* edma_status from a response queue entry:
* LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
* MSB is saved ATA status from command completion.
*/
if (!ncq_enabled) {
u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
if (err_cause) {
/*
* Error will be seen/handled by
* mv_err_intr(). So do nothing at all here.
*/
return false;
}
}
ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
if (!ac_err_mask(ata_status))
return true;
/* else: leave it for mv_err_intr() */
return false;
}
static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_host_priv *hpriv = ap->host->private_data;
u32 in_index;
bool work_done = false;
u32 done_mask = 0;
int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
/* Get the hardware queue position index */
in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
/* Process new responses from since the last time we looked */
while (in_index != pp->resp_idx) {
unsigned int tag;
struct mv_crpb *response = &pp->crpb[pp->resp_idx];
pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
if (IS_GEN_I(hpriv)) {
/* 50xx: no NCQ, only one command active at a time */
tag = ap->link.active_tag;
} else {
/* Gen II/IIE: get command tag from CRPB entry */
tag = le16_to_cpu(response->id) & 0x1f;
}
if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
done_mask |= 1 << tag;
work_done = true;
}
if (work_done) {
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
/* Update the software queue position index in hardware */
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
(pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
port_mmio + EDMA_RSP_Q_OUT_PTR);
}
}
static void mv_port_intr(struct ata_port *ap, u32 port_cause)
{
struct mv_port_priv *pp;
int edma_was_enabled;
/*
* Grab a snapshot of the EDMA_EN flag setting,
* so that we have a consistent view for this port,
* even if something we call of our routines changes it.
*/
pp = ap->private_data;
edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
/*
* Process completed CRPB response(s) before other events.
*/
if (edma_was_enabled && (port_cause & DONE_IRQ)) {
mv_process_crpb_entries(ap, pp);
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
mv_handle_fbs_ncq_dev_err(ap);
}
/*
* Handle chip-reported errors, or continue on to handle PIO.
*/
if (unlikely(port_cause & ERR_IRQ)) {
mv_err_intr(ap);
} else if (!edma_was_enabled) {
struct ata_queued_cmd *qc = mv_get_active_qc(ap);
if (qc)
ata_bmdma_port_intr(ap, qc);
else
mv_unexpected_intr(ap, edma_was_enabled);
}
}
/**
* mv_host_intr - Handle all interrupts on the given host controller
* @host: host specific structure
* @main_irq_cause: Main interrupt cause register for the chip.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base, *hc_mmio;
unsigned int handled = 0, port;
/* If asserted, clear the "all ports" IRQ coalescing bit */
if (main_irq_cause & ALL_PORTS_COAL_DONE)
writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
for (port = 0; port < hpriv->n_ports; port++) {
struct ata_port *ap = host->ports[port];
unsigned int p, shift, hardport, port_cause;
MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
/*
* Each hc within the host has its own hc_irq_cause register,
* where the interrupting ports bits get ack'd.
*/
if (hardport == 0) { /* first port on this hc ? */
u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
u32 port_mask, ack_irqs;
/*
* Skip this entire hc if nothing pending for any ports
*/
if (!hc_cause) {
port += MV_PORTS_PER_HC - 1;
continue;
}
/*
* We don't need/want to read the hc_irq_cause register,
* because doing so hurts performance, and
* main_irq_cause already gives us everything we need.
*
* But we do have to *write* to the hc_irq_cause to ack
* the ports that we are handling this time through.
*
* This requires that we create a bitmap for those
* ports which interrupted us, and use that bitmap
* to ack (only) those ports via hc_irq_cause.
*/
ack_irqs = 0;
if (hc_cause & PORTS_0_3_COAL_DONE)
ack_irqs = HC_COAL_IRQ;
for (p = 0; p < MV_PORTS_PER_HC; ++p) {
if ((port + p) >= hpriv->n_ports)
break;
port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
if (hc_cause & port_mask)
ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
}
hc_mmio = mv_hc_base_from_port(mmio, port);
writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
handled = 1;
}
/*
* Handle interrupts signalled for this port:
*/
port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
if (port_cause)
mv_port_intr(ap, port_cause);
}
return handled;
}
static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
{
struct mv_host_priv *hpriv = host->private_data;
struct ata_port *ap;
struct ata_queued_cmd *qc;
struct ata_eh_info *ehi;
unsigned int i, err_mask, printed = 0;
u32 err_cause;
err_cause = readl(mmio + hpriv->irq_cause_offset);
dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
DPRINTK("All regs @ PCI error\n");
mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
writelfl(0, mmio + hpriv->irq_cause_offset);
for (i = 0; i < host->n_ports; i++) {
ap = host->ports[i];
if (!ata_link_offline(&ap->link)) {
ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
if (!printed++)
ata_ehi_push_desc(ehi,
"PCI err cause 0x%08x", err_cause);
err_mask = AC_ERR_HOST_BUS;
ehi->action = ATA_EH_RESET;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
ata_port_freeze(ap);
}
}
return 1; /* handled */
}
/**
* mv_interrupt - Main interrupt event handler
* @irq: unused
* @dev_instance: private data; in this case the host structure
*
* Read the read only register to determine if any host
* controllers have pending interrupts. If so, call lower level
* routine to handle. Also check for PCI errors which are only
* reported here.
*
* LOCKING:
* This routine holds the host lock while processing pending
* interrupts.
*/
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct mv_host_priv *hpriv = host->private_data;
unsigned int handled = 0;
int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
u32 main_irq_cause, pending_irqs;
spin_lock(&host->lock);
/* for MSI: block new interrupts while in here */
if (using_msi)
mv_write_main_irq_mask(0, hpriv);
main_irq_cause = readl(hpriv->main_irq_cause_addr);
pending_irqs = main_irq_cause & hpriv->main_irq_mask;
/*
* Deal with cases where we either have nothing pending, or have read
* a bogus register value which can indicate HW removal or PCI fault.
*/
if (pending_irqs && main_irq_cause != 0xffffffffU) {
if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
handled = mv_pci_error(host, hpriv->base);
else
handled = mv_host_intr(host, pending_irqs);
}
/* for MSI: unmask; interrupt cause bits will retrigger now */
if (using_msi)
mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
{
unsigned int ofs;
switch (sc_reg_in) {
case SCR_STATUS:
case SCR_ERROR:
case SCR_CONTROL:
ofs = sc_reg_in * sizeof(u32);
break;
default:
ofs = 0xffffffffU;
break;
}
return ofs;
}
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{
struct mv_host_priv *hpriv = link->ap->host->private_data;
void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
*val = readl(addr + ofs);
return 0;
} else
return -EINVAL;
}
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{
struct mv_host_priv *hpriv = link->ap->host->private_data;
void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
writelfl(val, addr + ofs);
return 0;
} else
return -EINVAL;
}
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
int early_5080;
early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
if (!early_5080) {
u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
tmp |= (1 << 0);
writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
}
mv_reset_pci_bus(host, mmio);
}
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
writel(0x0fcfffff, mmio + FLASH_CTL);
}
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio)
{
void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
u32 tmp;
tmp = readl(phy_mmio + MV5_PHY_MODE);
hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
}
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{
u32 tmp;
writel(0, mmio + GPIO_PORT_CTL);
/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
tmp |= ~(1 << 0);
writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
}
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port)
{
void __iomem *phy_mmio = mv5_phy_base(mmio, port);
const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
u32 tmp;
int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
if (fix_apm_sq) {
tmp = readl(phy_mmio + MV5_LTMODE);
tmp |= (1 << 19);
writel(tmp, phy_mmio + MV5_LTMODE);
tmp = readl(phy_mmio + MV5_PHY_CTL);
tmp &= ~0x3;
tmp |= 0x1;
writel(tmp, phy_mmio + MV5_PHY_CTL);
}
tmp = readl(phy_mmio + MV5_PHY_MODE);
tmp &= ~mask;
tmp |= hpriv->signal[port].pre;
tmp |= hpriv->signal[port].amps;
writel(tmp, phy_mmio + MV5_PHY_MODE);
}
#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
writel(0x11f, port_mmio + EDMA_CFG);
ZERO(0x004); /* timer */
ZERO(0x008); /* irq err cause */
ZERO(0x00c); /* irq err mask */
ZERO(0x010); /* rq bah */
ZERO(0x014); /* rq inp */
ZERO(0x018); /* rq outp */
ZERO(0x01c); /* respq bah */
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
}
#undef ZERO
#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int hc)
{
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
u32 tmp;
ZERO(0x00c);
ZERO(0x010);
ZERO(0x014);
ZERO(0x018);
tmp = readl(hc_mmio + 0x20);
tmp &= 0x1c1c1c1c;
tmp |= 0x03030303;
writel(tmp, hc_mmio + 0x20);
}
#undef ZERO
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc)
{
unsigned int hc, port;
for (hc = 0; hc < n_hc; hc++) {
for (port = 0; port < MV_PORTS_PER_HC; port++)
mv5_reset_hc_port(hpriv, mmio,
(hc * MV_PORTS_PER_HC) + port);
mv5_reset_one_hc(hpriv, mmio, hc);
}
return 0;
}
#undef ZERO
#define ZERO(reg) writel(0, mmio + (reg))
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
{
struct mv_host_priv *hpriv = host->private_data;
u32 tmp;
tmp = readl(mmio + MV_PCI_MODE);
tmp &= 0xff00ffff;
writel(tmp, mmio + MV_PCI_MODE);
ZERO(MV_PCI_DISC_TIMER);
ZERO(MV_PCI_MSI_TRIGGER);
writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
ZERO(MV_PCI_SERR_MASK);
ZERO(hpriv->irq_cause_offset);
ZERO(hpriv->irq_mask_offset);
ZERO(MV_PCI_ERR_LOW_ADDRESS);
ZERO(MV_PCI_ERR_HIGH_ADDRESS);
ZERO(MV_PCI_ERR_ATTRIBUTE);
ZERO(MV_PCI_ERR_COMMAND);
}
#undef ZERO
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
u32 tmp;
mv5_reset_flash(hpriv, mmio);
tmp = readl(mmio + GPIO_PORT_CTL);
tmp &= 0x3;
tmp |= (1 << 5) | (1 << 6);
writel(tmp, mmio + GPIO_PORT_CTL);
}
/**
* mv6_reset_hc - Perform the 6xxx global soft reset
* @mmio: base address of the HBA
*
* This routine only applies to 6xxx parts.
*
* LOCKING:
* Inherited from caller.
*/
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc)
{
void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
int i, rc = 0;
u32 t;
/* Following procedure defined in PCI "main command and status
* register" table.
*/
t = readl(reg);
writel(t | STOP_PCI_MASTER, reg);
for (i = 0; i < 1000; i++) {
udelay(1);
t = readl(reg);
if (PCI_MASTER_EMPTY & t)
break;
}
if (!(PCI_MASTER_EMPTY & t)) {
printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
rc = 1;
goto done;
}
/* set reset */
i = 5;
do {
writel(t | GLOB_SFT_RST, reg);
t = readl(reg);
udelay(1);
} while (!(GLOB_SFT_RST & t) && (i-- > 0));
if (!(GLOB_SFT_RST & t)) {
printk(KERN_ERR DRV_NAME ": can't set global reset\n");
rc = 1;
goto done;
}
/* clear reset and *reenable the PCI master* (not mentioned in spec) */
i = 5;
do {
writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
t = readl(reg);
udelay(1);
} while ((GLOB_SFT_RST & t) && (i-- > 0));
if (GLOB_SFT_RST & t) {
printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
rc = 1;
}
done:
return rc;
}
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio)
{
void __iomem *port_mmio;
u32 tmp;
tmp = readl(mmio + RESET_CFG);
if ((tmp & (1 << 0)) == 0) {
hpriv->signal[idx].amps = 0x7 << 8;
hpriv->signal[idx].pre = 0x1 << 5;
return;
}
port_mmio = mv_port_base(mmio, idx);
tmp = readl(port_mmio + PHY_MODE2);
hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
}
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{
writel(0x00000060, mmio + GPIO_PORT_CTL);
}
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
u32 hp_flags = hpriv->hp_flags;
int fix_phy_mode2 =
hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
int fix_phy_mode4 =
hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
u32 m2, m3;
if (fix_phy_mode2) {
m2 = readl(port_mmio + PHY_MODE2);
m2 &= ~(1 << 16);
m2 |= (1 << 31);
writel(m2, port_mmio + PHY_MODE2);
udelay(200);
m2 = readl(port_mmio + PHY_MODE2);
m2 &= ~((1 << 16) | (1 << 31));
writel(m2, port_mmio + PHY_MODE2);
udelay(200);
}
/*
* Gen-II/IIe PHY_MODE3 errata RM#2:
* Achieves better receiver noise performance than the h/w default:
*/
m3 = readl(port_mmio + PHY_MODE3);
m3 = (m3 & 0x1f) | (0x5555601 << 5);
/* Guideline 88F5182 (GL# SATA-S11) */
if (IS_SOC(hpriv))
m3 &= ~0x1c;
if (fix_phy_mode4) {
u32 m4 = readl(port_mmio + PHY_MODE4);
/*
* Enforce reserved-bit restrictions on GenIIe devices only.
* For earlier chipsets, force only the internal config field
* (workaround for errata FEr SATA#10 part 1).
*/
if (IS_GEN_IIE(hpriv))
m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
else
m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
writel(m4, port_mmio + PHY_MODE4);
}
/*
* Workaround for 60x1-B2 errata SATA#13:
* Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
* so we must always rewrite PHY_MODE3 after PHY_MODE4.
* Or ensure we use writelfl() when writing PHY_MODE4.
*/
writel(m3, port_mmio + PHY_MODE3);
/* Revert values of pre-emphasis and signal amps to the saved ones */
m2 = readl(port_mmio + PHY_MODE2);
m2 &= ~MV_M2_PREAMP_MASK;
m2 |= hpriv->signal[port].amps;
m2 |= hpriv->signal[port].pre;
m2 &= ~(1 << 16);
/* according to mvSata 3.6.1, some IIE values are fixed */
if (IS_GEN_IIE(hpriv)) {
m2 &= ~0xC30FF01F;
m2 |= 0x0000900F;
}
writel(m2, port_mmio + PHY_MODE2);
}
/* TODO: use the generic LED interface to configure the SATA Presence */
/* & Acitivy LEDs on the board */
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
void __iomem *mmio)
{
return;
}
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio)
{
void __iomem *port_mmio;
u32 tmp;
port_mmio = mv_port_base(mmio, idx);
tmp = readl(port_mmio + PHY_MODE2);
hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
}
#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
writel(0x101f, port_mmio + EDMA_CFG);
ZERO(0x004); /* timer */
ZERO(0x008); /* irq err cause */
ZERO(0x00c); /* irq err mask */
ZERO(0x010); /* rq bah */
ZERO(0x014); /* rq inp */
ZERO(0x018); /* rq outp */
ZERO(0x01c); /* respq bah */
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
}
#undef ZERO
#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
void __iomem *mmio)
{
void __iomem *hc_mmio = mv_hc_base(mmio, 0);
ZERO(0x00c);
ZERO(0x010);
ZERO(0x014);
}
#undef ZERO
static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int n_hc)
{
unsigned int port;
for (port = 0; port < hpriv->n_ports; port++)
mv_soc_reset_hc_port(hpriv, mmio, port);
mv_soc_reset_one_hc(hpriv, mmio);
return 0;
}
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
void __iomem *mmio)
{
return;
}
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
{
return;
}
static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
u32 reg;
reg = readl(port_mmio + PHY_MODE3);
reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
reg |= (0x1 << 27);
reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
reg |= (0x1 << 29);
writel(reg, port_mmio + PHY_MODE3);
reg = readl(port_mmio + PHY_MODE4);
reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
reg |= (0x1 << 16);
writel(reg, port_mmio + PHY_MODE4);
reg = readl(port_mmio + PHY_MODE9_GEN2);
reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
reg |= 0x8;
reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
writel(reg, port_mmio + PHY_MODE9_GEN2);
reg = readl(port_mmio + PHY_MODE9_GEN1);
reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
reg |= 0x8;
reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
writel(reg, port_mmio + PHY_MODE9_GEN1);
}
/**
* soc_is_65 - check if the soc is 65 nano device
*
* Detect the type of the SoC, this is done by reading the PHYCFG_OFS
* register, this register should contain non-zero value and it exists only
* in the 65 nano devices, when reading it from older devices we get 0.
*/
static bool soc_is_65n(struct mv_host_priv *hpriv)
{
void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
if (readl(port0_mmio + PHYCFG_OFS))
return true;
return false;
}
static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
{
u32 ifcfg = readl(port_mmio + SATA_IFCFG);
ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
if (want_gen2i)
ifcfg |= (1 << 7); /* enable gen2i speed */
writelfl(ifcfg, port_mmio + SATA_IFCFG);
}
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no)
{
void __iomem *port_mmio = mv_port_base(mmio, port_no);
/*
* The datasheet warns against setting EDMA_RESET when EDMA is active
* (but doesn't say what the problem might be). So we first try
* to disable the EDMA engine before doing the EDMA_RESET operation.
*/
mv_stop_edma_engine(port_mmio);
writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
if (!IS_GEN_I(hpriv)) {
/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
mv_setup_ifcfg(port_mmio, 1);
}
/*
* Strobing EDMA_RESET here causes a hard reset of the SATA transport,
* link, and physical layers. It resets all SATA interface registers
* (except for SATA_IFCFG), and issues a COMRESET to the dev.
*/
writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
udelay(25); /* allow reset propagation */
writelfl(0, port_mmio + EDMA_CMD);
hpriv->ops->phy_errata(hpriv, mmio, port_no);
if (IS_GEN_I(hpriv))
mdelay(1);
}
static void mv_pmp_select(struct ata_port *ap, int pmp)
{
if (sata_pmp_supported(ap)) {
void __iomem *port_mmio = mv_ap_base(ap);
u32 reg = readl(port_mmio + SATA_IFCTL);
int old = reg & 0xf;
if (old != pmp) {
reg = (reg & ~0xf) | pmp;
writelfl(reg, port_mmio + SATA_IFCTL);
}
}
}
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
mv_pmp_select(link->ap, sata_srst_pmp(link));
return sata_std_hardreset(link, class, deadline);
}
static int mv_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
mv_pmp_select(link->ap, sata_srst_pmp(link));
return ata_sff_softreset(link, class, deadline);
}
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp = ap->private_data;
void __iomem *mmio = hpriv->base;
int rc, attempts = 0, extra = 0;
u32 sstatus;
bool online;
mv_reset_channel(hpriv, mmio, ap->port_no);
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
pp->pp_flags &=
~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
/* Workaround for errata FEr SATA#10 (part 2) */
do {
const unsigned long *timing =
sata_ehc_deb_timing(&link->eh_context);
rc = sata_link_hardreset(link, timing, deadline + extra,
&online, NULL);
rc = online ? -EAGAIN : rc;
if (rc)
return rc;
sata_scr_read(link, SCR_STATUS, &sstatus);
if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
/* Force 1.5gb/s link speed and try again */
mv_setup_ifcfg(mv_ap_base(ap), 0);
if (time_after(jiffies + HZ, deadline))
extra = HZ; /* only extend it once, max */
}
} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
mv_save_cached_regs(ap);
mv_edma_cfg(ap, 0, 0);
return rc;
}
static void mv_eh_freeze(struct ata_port *ap)
{
mv_stop_edma(ap);
mv_enable_port_irqs(ap, 0);
}
static void mv_eh_thaw(struct ata_port *ap)
{
struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int port = ap->port_no;
unsigned int hardport = mv_hardport_from_port(port);
void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
void __iomem *port_mmio = mv_ap_base(ap);
u32 hc_irq_cause;
/* clear EDMA errors on this port */
writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
/* clear pending irq events */
hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
mv_enable_port_irqs(ap, ERR_IRQ);
}
/**
* mv_port_init - Perform some early initialization on a single port.
* @port: libata data structure storing shadow register addresses
* @port_mmio: base address of the port
*
* Initialize shadow register mmio addresses, clear outstanding
* interrupts on the port, and unmask interrupts for the future
* start of the port.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
{
void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
/* PIO related setup
*/
port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
port->error_addr =
port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
port->status_addr =
port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
/* special case: control/altstatus doesn't have ATA_REG_ address */
port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
/* Clear any currently outstanding port interrupt conditions */
serr = port_mmio + mv_scr_offset(SCR_ERROR);
writelfl(readl(serr), serr);
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
/* unmask all non-transient EDMA error interrupts */
writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
readl(port_mmio + EDMA_CFG),
readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
readl(port_mmio + EDMA_ERR_IRQ_MASK));
}
static unsigned int mv_in_pcix_mode(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
u32 reg;
if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
return 0; /* not PCI-X capable */
reg = readl(mmio + MV_PCI_MODE);
if ((reg & MV_PCI_MODE_MASK) == 0)
return 0; /* conventional PCI mode */
return 1; /* chip is in PCI-X mode */
}
static int mv_pci_cut_through_okay(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
u32 reg;
if (!mv_in_pcix_mode(host)) {
reg = readl(mmio + MV_PCI_COMMAND);
if (reg & MV_PCI_COMMAND_MRDTRIG)
return 0; /* not okay */
}
return 1; /* okay */
}
static void mv_60x1b2_errata_pci7(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
/* workaround for 60x1-B2 errata PCI#7 */
if (mv_in_pcix_mode(host)) {
u32 reg = readl(mmio + MV_PCI_COMMAND);
writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
}
}
static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct mv_host_priv *hpriv = host->private_data;
u32 hp_flags = hpriv->hp_flags;
switch (board_idx) {
case chip_5080:
hpriv->ops = &mv5xxx_ops;
hp_flags |= MV_HP_GEN_I;
switch (pdev->revision) {
case 0x1:
hp_flags |= MV_HP_ERRATA_50XXB0;
break;
case 0x3:
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
dev_warn(&pdev->dev,
"Applying 50XXB2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
break;
case chip_504x:
case chip_508x:
hpriv->ops = &mv5xxx_ops;
hp_flags |= MV_HP_GEN_I;
switch (pdev->revision) {
case 0x0:
hp_flags |= MV_HP_ERRATA_50XXB0;
break;
case 0x3:
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
dev_warn(&pdev->dev,
"Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
break;
case chip_604x:
case chip_608x:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_II;
switch (pdev->revision) {
case 0x7:
mv_60x1b2_errata_pci7(host);
hp_flags |= MV_HP_ERRATA_60X1B2;
break;
case 0x9:
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
dev_warn(&pdev->dev,
"Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1B2;
break;
}
break;
case chip_7042:
hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
if (pdev->vendor == PCI_VENDOR_ID_TTI &&
(pdev->device == 0x2300 || pdev->device == 0x2310))
{
/*
* Highpoint RocketRAID PCIe 23xx series cards:
*
* Unconfigured drives are treated as "Legacy"
* by the BIOS, and it overwrites sector 8 with
* a "Lgcy" metadata block prior to Linux boot.
*
* Configured drives (RAID or JBOD) leave sector 8
* alone, but instead overwrite a high numbered
* sector for the RAID metadata. This sector can
* be determined exactly, by truncating the physical
* drive capacity to a nice even GB value.
*
* RAID metadata is at: (dev->n_sectors & ~0xfffff)
*
* Warn the user, lest they think we're just buggy.
*/
printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
" BIOS CORRUPTS DATA on all attached drives,"
" regardless of if/how they are configured."
" BEWARE!\n");
printk(KERN_WARNING DRV_NAME ": For data safety, do not"
" use sectors 8-9 on \"Legacy\" drives,"
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
}
/* drop through */
case chip_6042:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_IIE;
if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
hp_flags |= MV_HP_CUT_THROUGH;
switch (pdev->revision) {
case 0x2: /* Rev.B0: the first/only public release */
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
dev_warn(&pdev->dev,
"Applying 60X1C0 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
}
break;
case chip_soc:
if (soc_is_65n(hpriv))
hpriv->ops = &mv_soc_65n_ops;
else
hpriv->ops = &mv_soc_ops;
hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
MV_HP_ERRATA_60X1C0;
break;
default:
dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
return 1;
}
hpriv->hp_flags = hp_flags;
if (hp_flags & MV_HP_PCIE) {
hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
hpriv->irq_mask_offset = PCIE_IRQ_MASK;
hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
} else {
hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
hpriv->irq_mask_offset = PCI_IRQ_MASK;
hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
}
return 0;
}
/**
* mv_init_host - Perform some early initialization of the host.
* @host: ATA host to initialize
*
* If possible, do an early global reset of the host. Then do
* our port init and clear/unmask all/relevant host interrupts.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_init_host(struct ata_host *host)
{
int rc = 0, n_hc, port, hc;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
rc = mv_chip_id(host, hpriv->board_idx);
if (rc)
goto done;
if (IS_SOC(hpriv)) {
hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
} else {
hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
}
/* initialize shadow irq mask with register's value */
hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
/* global interrupt mask: 0 == mask everything */
mv_set_main_irq_mask(host, ~0, 0);
n_hc = mv_get_hc_count(host->ports[0]->flags);
for (port = 0; port < host->n_ports; port++)
if (hpriv->ops->read_preamp)
hpriv->ops->read_preamp(hpriv, port, mmio);
rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
if (rc)
goto done;
hpriv->ops->reset_flash(hpriv, mmio);
hpriv->ops->reset_bus(host, mmio);
hpriv->ops->enable_leds(hpriv, mmio);
for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_port_init(&ap->ioaddr, port_mmio);
}
for (hc = 0; hc < n_hc; hc++) {
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
"(before clear)=0x%08x\n", hc,
readl(hc_mmio + HC_CFG),
readl(hc_mmio + HC_IRQ_CAUSE));
/* Clear any currently outstanding hc interrupt conditions */
writelfl(0, hc_mmio + HC_IRQ_CAUSE);
}
if (!IS_SOC(hpriv)) {
/* Clear any currently outstanding host interrupt conditions */
writelfl(0, mmio + hpriv->irq_cause_offset);
/* and unmask interrupt generation for host regs */
writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
}
/*
* enable only global host interrupts for now.
* The per-port interrupts get done later as ports are set up.
*/
mv_set_main_irq_mask(host, 0, PCI_ERR);
mv_set_irq_coalescing(host, irq_coalescing_io_count,
irq_coalescing_usecs);
done:
return rc;
}
static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
{
hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
MV_CRQB_Q_SZ, 0);
if (!hpriv->crqb_pool)
return -ENOMEM;
hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
MV_CRPB_Q_SZ, 0);
if (!hpriv->crpb_pool)
return -ENOMEM;
hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
MV_SG_TBL_SZ, 0);
if (!hpriv->sg_tbl_pool)
return -ENOMEM;
return 0;
}
static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
struct mbus_dram_target_info *dram)
{
int i;
for (i = 0; i < 4; i++) {
writel(0, hpriv->base + WINDOW_CTRL(i));
writel(0, hpriv->base + WINDOW_BASE(i));
}
for (i = 0; i < dram->num_cs; i++) {
struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
hpriv->base + WINDOW_CTRL(i));
writel(cs->base, hpriv->base + WINDOW_BASE(i));
}
}
/**
* mv_platform_probe - handle a positive probe of an soc Marvell
* host
* @pdev: platform device found
*
* LOCKING:
* Inherited from caller.
*/
static int mv_platform_probe(struct platform_device *pdev)
{
const struct mv_sata_platform_data *mv_platform_data;
const struct ata_port_info *ppi[] =
{ &mv_port_info[chip_soc], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
struct resource *res;
int n_ports, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/*
* Simple resource validation ..
*/
if (unlikely(pdev->num_resources != 2)) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
/*
* Get the register base first
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL)
return -EINVAL;
/* allocate host */
mv_platform_data = pdev->dev.platform_data;
n_ports = mv_platform_data->n_ports;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!host || !hpriv)
return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
hpriv->base -= SATAHC0_REG_BASE;
#if defined(CONFIG_HAVE_CLK)
hpriv->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(hpriv->clk))
dev_notice(&pdev->dev, "cannot get clkdev\n");
else
clk_enable(hpriv->clk);
#endif
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
if (mv_platform_data->dram != NULL)
mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
goto err;
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
goto err;
dev_info(&pdev->dev, "slots %u ports %d\n",
(unsigned)MV_MAX_Q_DEPTH, host->n_ports);
return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
IRQF_SHARED, &mv6_sht);
err:
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
clk_disable(hpriv->clk);
clk_put(hpriv->clk);
}
#endif
return rc;
}
/*
*
* mv_platform_remove - unplug a platform interface
* @pdev: platform device
*
* A platform bus SATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
static int __devexit mv_platform_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
#if defined(CONFIG_HAVE_CLK)
struct mv_host_priv *hpriv = host->private_data;
#endif
ata_host_detach(host);
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
clk_disable(hpriv->clk);
clk_put(hpriv->clk);
}
#endif
return 0;
}
#ifdef CONFIG_PM
static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
if (host)
return ata_host_suspend(host, state);
else
return 0;
}
static int mv_platform_resume(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int ret;
if (host) {
struct mv_host_priv *hpriv = host->private_data;
const struct mv_sata_platform_data *mv_platform_data = \
pdev->dev.platform_data;
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
if (mv_platform_data->dram != NULL)
mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
/* initialize adapter */
ret = mv_init_host(host);
if (ret) {
printk(KERN_ERR DRV_NAME ": Error during HW init\n");
return ret;
}
ata_host_resume(host);
}
return 0;
}
#else
#define mv_platform_suspend NULL
#define mv_platform_resume NULL
#endif
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
.remove = __devexit_p(mv_platform_remove),
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
#ifdef CONFIG_PCI
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
#ifdef CONFIG_PM
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
.id_table = mv_pci_tbl,
.probe = mv_pci_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = mv_pci_device_resume,
#endif
};
/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
int rc;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
}
return rc;
}
/**
* mv_print_info - Dump key info to kernel log for perusal.
* @host: ATA host to print info about
*
* FIXME: complete this.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_print_info(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct mv_host_priv *hpriv = host->private_data;
u8 scc;
const char *scc_s, *gen;
/* Use this to determine the HW stepping of the chip so we know
* what errata to workaround
*/
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
if (scc == 0)
scc_s = "SCSI";
else if (scc == 0x01)
scc_s = "RAID";
else
scc_s = "?";
if (IS_GEN_I(hpriv))
gen = "I";
else if (IS_GEN_II(hpriv))
gen = "II";
else if (IS_GEN_IIE(hpriv))
gen = "IIE";
else
gen = "?";
dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
}
/**
* mv_pci_init_one - handle a positive probe of a PCI Marvell host
* @pdev: PCI device found
* @ent: PCI device ID entry for the matched host
*
* LOCKING:
* Inherited from caller.
*/
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int board_idx = (unsigned int)ent->driver_data;
const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
int n_ports, port, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!host || !hpriv)
return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = board_idx;
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
hpriv->base = host->iomap[MV_PRIMARY_BAR];
rc = pci_go_64(pdev);
if (rc)
return rc;
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
return rc;
for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(hpriv->base, port);
unsigned int offset = port_mmio - hpriv->base;
ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
}
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
return rc;
/* Enable message-switched interrupts, if requested */
if (msi && pci_enable_msi(pdev) == 0)
hpriv->hp_flags |= MV_HP_FLAG_MSI;
mv_dump_pci_cfg(pdev, 0x68);
mv_print_info(host);
pci_set_master(pdev);
pci_try_set_mwi(pdev);
return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
}
#ifdef CONFIG_PM
static int mv_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
return rc;
ata_host_resume(host);
return 0;
}
#endif
#endif
static int mv_platform_probe(struct platform_device *pdev);
static int __devexit mv_platform_remove(struct platform_device *pdev);
static int __init mv_init(void)
{
int rc = -ENODEV;
#ifdef CONFIG_PCI
rc = pci_register_driver(&mv_pci_driver);
if (rc < 0)
return rc;
#endif
rc = platform_driver_register(&mv_platform_driver);
#ifdef CONFIG_PCI
if (rc < 0)
pci_unregister_driver(&mv_pci_driver);
#endif
return rc;
}
static void __exit mv_exit(void)
{
#ifdef CONFIG_PCI
pci_unregister_driver(&mv_pci_driver);
#endif
platform_driver_unregister(&mv_platform_driver);
}
MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
module_init(mv_init);
module_exit(mv_exit);
| gpl-2.0 |
scanno/android_kernel_asus_me301t | drivers/net/r6040.c | 387 | 33553 | /*
* RDC R6040 Fast Ethernet MAC support
*
* Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
* Copyright (C) 2007
* Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
* Florian Fainelli <florian@openwrt.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/uaccess.h>
#include <linux/phy.h>
#include <asm/processor.h>
#define DRV_NAME "r6040"
#define DRV_VERSION "0.27"
#define DRV_RELDATE "23Feb2011"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
#define PHY2_ADDR 3 /* For MAC2 */
#define PHY_MODE 0x3100 /* PHY CHIP Register 0 */
#define PHY_CAP 0x01E1 /* PHY CHIP Register 4 */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6000 * HZ / 1000)
/* RDC MAC I/O Size */
#define R6040_IO_SIZE 256
/* MAX RDC MAC */
#define MAX_MAC 2
/* MAC registers */
#define MCR0 0x00 /* Control register 0 */
#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
#define MCR1 0x04 /* Control register 1 */
#define MAC_RST 0x0001 /* Reset the MAC */
#define MBCR 0x08 /* Bus control */
#define MT_ICR 0x0C /* TX interrupt control */
#define MR_ICR 0x10 /* RX interrupt control */
#define MTPR 0x14 /* TX poll command register */
#define MR_BSR 0x18 /* RX buffer size */
#define MR_DCR 0x1A /* RX descriptor control */
#define MLSR 0x1C /* Last status */
#define MMDIO 0x20 /* MDIO control register */
#define MDIO_WRITE 0x4000 /* MDIO write */
#define MDIO_READ 0x2000 /* MDIO read */
#define MMRD 0x24 /* MDIO read data register */
#define MMWD 0x28 /* MDIO write data register */
#define MTD_SA0 0x2C /* TX descriptor start address 0 */
#define MTD_SA1 0x30 /* TX descriptor start address 1 */
#define MRD_SA0 0x34 /* RX descriptor start address 0 */
#define MRD_SA1 0x38 /* RX descriptor start address 1 */
#define MISR 0x3C /* Status register */
#define MIER 0x40 /* INT enable register */
#define MSK_INT 0x0000 /* Mask off interrupts */
#define RX_FINISH 0x0001 /* RX finished */
#define RX_NO_DESC 0x0002 /* No RX descriptor available */
#define RX_FIFO_FULL 0x0004 /* RX FIFO full */
#define RX_EARLY 0x0008 /* RX early */
#define TX_FINISH 0x0010 /* TX finished */
#define TX_EARLY 0x0080 /* TX early */
#define EVENT_OVRFL 0x0100 /* Event counter overflow */
#define LINK_CHANGED 0x0200 /* PHY link changed */
#define ME_CISR 0x44 /* Event counter INT status */
#define ME_CIER 0x48 /* Event counter INT enable */
#define MR_CNT 0x50 /* Successfully received packet counter */
#define ME_CNT0 0x52 /* Event counter 0 */
#define ME_CNT1 0x54 /* Event counter 1 */
#define ME_CNT2 0x56 /* Event counter 2 */
#define ME_CNT3 0x58 /* Event counter 3 */
#define MT_CNT 0x5A /* Successfully transmit packet counter */
#define ME_CNT4 0x5C /* Event counter 4 */
#define MP_CNT 0x5E /* Pause frame counter register */
#define MAR0 0x60 /* Hash table 0 */
#define MAR1 0x62 /* Hash table 1 */
#define MAR2 0x64 /* Hash table 2 */
#define MAR3 0x66 /* Hash table 3 */
#define MID_0L 0x68 /* Multicast address MID0 Low */
#define MID_0M 0x6A /* Multicast address MID0 Medium */
#define MID_0H 0x6C /* Multicast address MID0 High */
#define MID_1L 0x70 /* MID1 Low */
#define MID_1M 0x72 /* MID1 Medium */
#define MID_1H 0x74 /* MID1 High */
#define MID_2L 0x78 /* MID2 Low */
#define MID_2M 0x7A /* MID2 Medium */
#define MID_2H 0x7C /* MID2 High */
#define MID_3L 0x80 /* MID3 Low */
#define MID_3M 0x82 /* MID3 Medium */
#define MID_3H 0x84 /* MID3 High */
#define PHY_CC 0x88 /* PHY status change configuration register */
#define PHY_ST 0x8A /* PHY status register */
#define MAC_SM 0xAC /* MAC status machine */
#define MAC_ID 0xBE /* Identifier register */
#define TX_DCNT 0x80 /* TX descriptor count */
#define RX_DCNT 0x80 /* RX descriptor count */
#define MAX_BUF_SIZE 0x600
#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
#define MCAST_MAX 3 /* Max number multicast addresses to filter */
/* Descriptor status */
#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
#define DSC_RX_OK 0x4000 /* RX was successful */
#define DSC_RX_ERR 0x0800 /* RX PHY error */
#define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */
#define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */
#define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */
#define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */
#define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */
#define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */
#define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */
#define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */
#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
/* PHY settings */
#define ICPLUS_PHY_ID 0x0243
MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
"Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
"Florian Fainelli <florian@openwrt.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
/* RX and TX interrupts that we handle */
#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
#define TX_INTS (TX_FINISH)
#define INT_MASK (RX_INTS | TX_INTS)
struct r6040_descriptor {
u16 status, len; /* 0-3 */
__le32 buf; /* 4-7 */
__le32 ndesc; /* 8-B */
u32 rev1; /* C-F */
char *vbufp; /* 10-13 */
struct r6040_descriptor *vndescp; /* 14-17 */
struct sk_buff *skb_ptr; /* 18-1B */
u32 rev2; /* 1C-1F */
} __attribute__((aligned(32)));
struct r6040_private {
spinlock_t lock; /* driver lock */
struct pci_dev *pdev;
struct r6040_descriptor *rx_insert_ptr;
struct r6040_descriptor *rx_remove_ptr;
struct r6040_descriptor *tx_insert_ptr;
struct r6040_descriptor *tx_remove_ptr;
struct r6040_descriptor *rx_ring;
struct r6040_descriptor *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
u16 tx_free_desc, phy_addr;
u16 mcr0, mcr1;
struct net_device *dev;
struct mii_bus *mii_bus;
struct napi_struct napi;
void __iomem *base;
struct phy_device *phydev;
int old_link;
int old_duplex;
};
static char version[] __devinitdata = DRV_NAME
": RDC R6040 NAPI net driver,"
"version "DRV_VERSION " (" DRV_RELDATE ")";
static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
/* Read a word data from PHY Chip */
static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
{
int limit = 2048;
u16 cmd;
iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
/* Wait for the read bit to be cleared */
while (limit--) {
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_READ))
break;
}
return ioread16(ioaddr + MMRD);
}
/* Write a word data from PHY Chip */
static void r6040_phy_write(void __iomem *ioaddr,
int phy_addr, int reg, u16 val)
{
int limit = 2048;
u16 cmd;
iowrite16(val, ioaddr + MMWD);
/* Write the command to the MDIO bus */
iowrite16(MDIO_WRITE + reg + (phy_addr << 8), ioaddr + MMDIO);
/* Wait for the write bit to be cleared */
while (limit--) {
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_WRITE))
break;
}
}
static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
{
struct net_device *dev = bus->priv;
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
return r6040_phy_read(ioaddr, phy_addr, reg);
}
static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
int reg, u16 value)
{
struct net_device *dev = bus->priv;
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
r6040_phy_write(ioaddr, phy_addr, reg, value);
return 0;
}
static int r6040_mdiobus_reset(struct mii_bus *bus)
{
return 0;
}
static void r6040_free_txbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
int i;
for (i = 0; i < TX_DCNT; i++) {
if (lp->tx_insert_ptr->skb_ptr) {
pci_unmap_single(lp->pdev,
le32_to_cpu(lp->tx_insert_ptr->buf),
MAX_BUF_SIZE, PCI_DMA_TODEVICE);
dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
lp->tx_insert_ptr->skb_ptr = NULL;
}
lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
}
}
static void r6040_free_rxbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
int i;
for (i = 0; i < RX_DCNT; i++) {
if (lp->rx_insert_ptr->skb_ptr) {
pci_unmap_single(lp->pdev,
le32_to_cpu(lp->rx_insert_ptr->buf),
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
lp->rx_insert_ptr->skb_ptr = NULL;
}
lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
}
}
static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
dma_addr_t desc_dma, int size)
{
struct r6040_descriptor *desc = desc_ring;
dma_addr_t mapping = desc_dma;
while (size-- > 0) {
mapping += sizeof(*desc);
desc->ndesc = cpu_to_le32(mapping);
desc->vndescp = desc + 1;
desc++;
}
desc--;
desc->ndesc = cpu_to_le32(desc_dma);
desc->vndescp = desc_ring;
}
static void r6040_init_txbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
lp->tx_free_desc = TX_DCNT;
lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
}
static int r6040_alloc_rxbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct r6040_descriptor *desc;
struct sk_buff *skb;
int rc;
lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
/* Allocate skbs for the rx descriptors */
desc = lp->rx_ring;
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
netdev_err(dev, "failed to alloc skb for rx\n");
rc = -ENOMEM;
goto err_exit;
}
desc->skb_ptr = skb;
desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
desc->skb_ptr->data,
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
desc->status = DSC_OWNER_MAC;
desc = desc->vndescp;
} while (desc != lp->rx_ring);
return 0;
err_exit:
/* Deallocate all previously allocated skbs */
r6040_free_rxbufs(dev);
return rc;
}
static void r6040_init_mac_regs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int limit = 2048;
u16 cmd;
/* Mask Off Interrupt */
iowrite16(MSK_INT, ioaddr + MIER);
/* Reset RDC MAC */
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
if (cmd & 0x1)
break;
}
/* Reset internal state machine */
iowrite16(2, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
/* MAC Bus Control Register */
iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
/* Buffer Size Register */
iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
/* Write TX ring start address */
iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
/* Write RX ring start address */
iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
/* Set interrupt waiting time and packet numbers */
iowrite16(0, ioaddr + MT_ICR);
iowrite16(0, ioaddr + MR_ICR);
/* Enable interrupts */
iowrite16(INT_MASK, ioaddr + MIER);
/* Enable TX and RX */
iowrite16(lp->mcr0 | 0x0002, ioaddr);
/* Let TX poll the descriptors
* we may got called by r6040_tx_timeout which has left
* some unsent tx buffers */
iowrite16(0x01, ioaddr + MTPR);
}
static void r6040_tx_timeout(struct net_device *dev)
{
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
netdev_warn(dev, "transmit timed out, int enable %4.4x "
"status %4.4x\n",
ioread16(ioaddr + MIER),
ioread16(ioaddr + MISR));
dev->stats.tx_errors++;
/* Reset MAC and re-init all registers */
r6040_init_mac_regs(dev);
}
static struct net_device_stats *r6040_get_stats(struct net_device *dev)
{
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
spin_unlock_irqrestore(&priv->lock, flags);
return &dev->stats;
}
/* Stop RDC MAC and Free the allocated resource */
static void r6040_down(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int limit = 2048;
u16 *adrp;
u16 cmd;
/* Stop MAC */
iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
if (cmd & 0x1)
break;
}
/* Restore MAC Address to MIDx */
adrp = (u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
}
static int r6040_close(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pdev;
spin_lock_irq(&lp->lock);
napi_disable(&lp->napi);
netif_stop_queue(dev);
r6040_down(dev);
free_irq(dev->irq, dev);
/* Free RX buffer */
r6040_free_rxbufs(dev);
/* Free TX buffer */
r6040_free_txbufs(dev);
spin_unlock_irq(&lp->lock);
/* Free Descriptor memory */
if (lp->rx_ring) {
pci_free_consistent(pdev,
RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
}
if (lp->tx_ring) {
pci_free_consistent(pdev,
TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
lp->tx_ring = NULL;
}
return 0;
}
static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct r6040_private *lp = netdev_priv(dev);
if (!lp->phydev)
return -EINVAL;
return phy_mii_ioctl(lp->phydev, rq, cmd);
}
static int r6040_rx(struct net_device *dev, int limit)
{
struct r6040_private *priv = netdev_priv(dev);
struct r6040_descriptor *descptr = priv->rx_remove_ptr;
struct sk_buff *skb_ptr, *new_skb;
int count = 0;
u16 err;
/* Limit not reached and the descriptor belongs to the CPU */
while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
/* Read the descriptor status */
err = descptr->status;
/* Global error status set */
if (err & DSC_RX_ERR) {
/* RX dribble */
if (err & DSC_RX_ERR_DRI)
dev->stats.rx_frame_errors++;
/* Buffer length exceeded */
if (err & DSC_RX_ERR_BUF)
dev->stats.rx_length_errors++;
/* Packet too long */
if (err & DSC_RX_ERR_LONG)
dev->stats.rx_length_errors++;
/* Packet < 64 bytes */
if (err & DSC_RX_ERR_RUNT)
dev->stats.rx_length_errors++;
/* CRC error */
if (err & DSC_RX_ERR_CRC) {
spin_lock(&priv->lock);
dev->stats.rx_crc_errors++;
spin_unlock(&priv->lock);
}
goto next_descr;
}
/* Packet successfully received */
new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!new_skb) {
dev->stats.rx_dropped++;
goto next_descr;
}
skb_ptr = descptr->skb_ptr;
skb_ptr->dev = priv->dev;
/* Do not count the CRC */
skb_put(skb_ptr, descptr->len - 4);
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
/* Send to upper layer */
netif_receive_skb(skb_ptr);
dev->stats.rx_packets++;
dev->stats.rx_bytes += descptr->len - 4;
/* put new skb into descriptor */
descptr->skb_ptr = new_skb;
descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
descptr->skb_ptr->data,
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
next_descr:
/* put the descriptor back to the MAC */
descptr->status = DSC_OWNER_MAC;
descptr = descptr->vndescp;
count++;
}
priv->rx_remove_ptr = descptr;
return count;
}
static void r6040_tx(struct net_device *dev)
{
struct r6040_private *priv = netdev_priv(dev);
struct r6040_descriptor *descptr;
void __iomem *ioaddr = priv->base;
struct sk_buff *skb_ptr;
u16 err;
spin_lock(&priv->lock);
descptr = priv->tx_remove_ptr;
while (priv->tx_free_desc < TX_DCNT) {
/* Check for errors */
err = ioread16(ioaddr + MLSR);
if (err & 0x0200)
dev->stats.rx_fifo_errors++;
if (err & (0x2000 | 0x4000))
dev->stats.tx_carrier_errors++;
if (descptr->status & DSC_OWNER_MAC)
break; /* Not complete */
skb_ptr = descptr->skb_ptr;
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
skb_ptr->len, PCI_DMA_TODEVICE);
/* Free buffer */
dev_kfree_skb_irq(skb_ptr);
descptr->skb_ptr = NULL;
/* To next descriptor */
descptr = descptr->vndescp;
priv->tx_free_desc++;
}
priv->tx_remove_ptr = descptr;
if (priv->tx_free_desc)
netif_wake_queue(dev);
spin_unlock(&priv->lock);
}
static int r6040_poll(struct napi_struct *napi, int budget)
{
struct r6040_private *priv =
container_of(napi, struct r6040_private, napi);
struct net_device *dev = priv->dev;
void __iomem *ioaddr = priv->base;
int work_done;
work_done = r6040_rx(dev, budget);
if (work_done < budget) {
napi_complete(napi);
/* Enable RX interrupt */
iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
}
return work_done;
}
/* The RDC interrupt handler. */
static irqreturn_t r6040_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
u16 misr, status;
/* Save MIER */
misr = ioread16(ioaddr + MIER);
/* Mask off RDC MAC interrupt */
iowrite16(MSK_INT, ioaddr + MIER);
/* Read MISR status and clear */
status = ioread16(ioaddr + MISR);
if (status == 0x0000 || status == 0xffff) {
/* Restore RDC MAC interrupt */
iowrite16(misr, ioaddr + MIER);
return IRQ_NONE;
}
/* RX interrupt request */
if (status & RX_INTS) {
if (status & RX_NO_DESC) {
/* RX descriptor unavailable */
dev->stats.rx_dropped++;
dev->stats.rx_missed_errors++;
}
if (status & RX_FIFO_FULL)
dev->stats.rx_fifo_errors++;
if (likely(napi_schedule_prep(&lp->napi))) {
/* Mask off RX interrupt */
misr &= ~RX_INTS;
__napi_schedule(&lp->napi);
}
}
/* TX interrupt request */
if (status & TX_INTS)
r6040_tx(dev);
/* Restore RDC MAC interrupt */
iowrite16(misr, ioaddr + MIER);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void r6040_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
r6040_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
/* Init RDC MAC */
static int r6040_up(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int ret;
/* Initialise and alloc RX/TX buffers */
r6040_init_txbufs(dev);
ret = r6040_alloc_rxbufs(dev);
if (ret)
return ret;
/* improve performance (by RDC guys) */
r6040_phy_write(ioaddr, 30, 17,
(r6040_phy_read(ioaddr, 30, 17) | 0x4000));
r6040_phy_write(ioaddr, 30, 17,
~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
r6040_phy_write(ioaddr, 0, 19, 0x0000);
r6040_phy_write(ioaddr, 0, 30, 0x01F0);
/* Initialize all MAC registers */
r6040_init_mac_regs(dev);
return 0;
}
/* Read/set MAC address routines */
static void r6040_mac_address(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
u16 *adrp;
/* MAC operation register */
iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
/* Restore MAC Address */
adrp = (u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
/* Store MAC Address in perm_addr */
memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
}
static int r6040_open(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
int ret;
/* Request IRQ and Register interrupt handler */
ret = request_irq(dev->irq, r6040_interrupt,
IRQF_SHARED, dev->name, dev);
if (ret)
goto out;
/* Set MAC address */
r6040_mac_address(dev);
/* Allocate Descriptor memory */
lp->rx_ring =
pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma);
if (!lp->rx_ring) {
ret = -ENOMEM;
goto err_free_irq;
}
lp->tx_ring =
pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma);
if (!lp->tx_ring) {
ret = -ENOMEM;
goto err_free_rx_ring;
}
ret = r6040_up(dev);
if (ret)
goto err_free_tx_ring;
napi_enable(&lp->napi);
netif_start_queue(dev);
return 0;
err_free_tx_ring:
pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
lp->tx_ring_dma);
err_free_rx_ring:
pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
lp->rx_ring_dma);
err_free_irq:
free_irq(dev->irq, dev);
out:
return ret;
}
static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct r6040_descriptor *descptr;
void __iomem *ioaddr = lp->base;
unsigned long flags;
/* Critical Section */
spin_lock_irqsave(&lp->lock, flags);
/* TX resource check */
if (!lp->tx_free_desc) {
spin_unlock_irqrestore(&lp->lock, flags);
netif_stop_queue(dev);
netdev_err(dev, ": no tx descriptor\n");
return NETDEV_TX_BUSY;
}
/* Statistic Counter */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
/* Set TX descriptor & Transmit it */
lp->tx_free_desc--;
descptr = lp->tx_insert_ptr;
if (skb->len < MISR)
descptr->len = MISR;
else
descptr->len = skb->len;
descptr->skb_ptr = skb;
descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
descptr->status = DSC_OWNER_MAC;
skb_tx_timestamp(skb);
/* Trigger the MAC to check the TX descriptor */
iowrite16(0x01, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
/* If no tx resource, stop */
if (!lp->tx_free_desc)
netif_stop_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
static void r6040_multicast_list(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
u16 *adrp;
u16 hash_table[4] = { 0 };
spin_lock_irqsave(&lp->lock, flags);
/* Keep our MAC Address */
adrp = (u16 *)dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
/* Clear AMCP & PROM bits */
lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
/* Promiscuous mode */
if (dev->flags & IFF_PROMISC)
lp->mcr0 |= MCR0_PROMISC;
/* Enable multicast hash table function to
* receive all multicast packets. */
else if (dev->flags & IFF_ALLMULTI) {
lp->mcr0 |= MCR0_HASH_EN;
for (i = 0; i < MCAST_MAX ; i++) {
iowrite16(0, ioaddr + MID_1L + 8 * i);
iowrite16(0, ioaddr + MID_1M + 8 * i);
iowrite16(0, ioaddr + MID_1H + 8 * i);
}
for (i = 0; i < 4; i++)
hash_table[i] = 0xffff;
}
/* Use internal multicast address registers if the number of
* multicast addresses is not greater than MCAST_MAX. */
else if (netdev_mc_count(dev) <= MCAST_MAX) {
i = 0;
netdev_for_each_mc_addr(ha, dev) {
u16 *adrp = (u16 *) ha->addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
i++;
}
while (i < MCAST_MAX) {
iowrite16(0, ioaddr + MID_1L + 8 * i);
iowrite16(0, ioaddr + MID_1M + 8 * i);
iowrite16(0, ioaddr + MID_1H + 8 * i);
i++;
}
}
/* Otherwise, Enable multicast hash table function. */
else {
u32 crc;
lp->mcr0 |= MCR0_HASH_EN;
for (i = 0; i < MCAST_MAX ; i++) {
iowrite16(0, ioaddr + MID_1L + 8 * i);
iowrite16(0, ioaddr + MID_1M + 8 * i);
iowrite16(0, ioaddr + MID_1H + 8 * i);
}
/* Build multicast hash table */
netdev_for_each_mc_addr(ha, dev) {
u8 *addrs = ha->addr;
crc = ether_crc(ETH_ALEN, addrs);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
}
iowrite16(lp->mcr0, ioaddr + MCR0);
/* Fill the MAC hash tables with their values */
if (lp->mcr0 && MCR0_HASH_EN) {
iowrite16(hash_table[0], ioaddr + MAR0);
iowrite16(hash_table[1], ioaddr + MAR1);
iowrite16(hash_table[2], ioaddr + MAR2);
iowrite16(hash_table[3], ioaddr + MAR3);
}
spin_unlock_irqrestore(&lp->lock, flags);
}
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct r6040_private *rp = netdev_priv(dev);
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, pci_name(rp->pdev));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct r6040_private *rp = netdev_priv(dev);
return phy_ethtool_gset(rp->phydev, cmd);
}
static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct r6040_private *rp = netdev_priv(dev);
return phy_ethtool_sset(rp->phydev, cmd);
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_settings = netdev_get_settings,
.set_settings = netdev_set_settings,
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops r6040_netdev_ops = {
.ndo_open = r6040_open,
.ndo_stop = r6040_close,
.ndo_start_xmit = r6040_start_xmit,
.ndo_get_stats = r6040_get_stats,
.ndo_set_multicast_list = r6040_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = r6040_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = r6040_poll_controller,
#endif
};
static void r6040_adjust_link(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct phy_device *phydev = lp->phydev;
int status_changed = 0;
void __iomem *ioaddr = lp->base;
BUG_ON(!phydev);
if (lp->old_link != phydev->link) {
status_changed = 1;
lp->old_link = phydev->link;
}
/* reflect duplex change */
if (phydev->link && (lp->old_duplex != phydev->duplex)) {
lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
iowrite16(lp->mcr0, ioaddr);
status_changed = 1;
lp->old_duplex = phydev->duplex;
}
if (status_changed) {
pr_info("%s: link %s", dev->name, phydev->link ?
"UP" : "DOWN");
if (phydev->link)
pr_cont(" - %d/%s", phydev->speed,
DUPLEX_FULL == phydev->duplex ? "full" : "half");
pr_cont("\n");
}
}
static int r6040_mii_probe(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct phy_device *phydev = NULL;
phydev = phy_find_first(lp->mii_bus);
if (!phydev) {
dev_err(&lp->pdev->dev, "no PHY found\n");
return -ENODEV;
}
phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
0, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(&lp->pdev->dev, "could not attach to PHY\n");
return PTR_ERR(phydev);
}
/* mask with MAC supported features */
phydev->supported &= (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
| SUPPORTED_Autoneg
| SUPPORTED_MII
| SUPPORTED_TP);
phydev->advertising = phydev->supported;
lp->phydev = phydev;
lp->old_link = 0;
lp->old_duplex = -1;
dev_info(&lp->pdev->dev, "attached PHY driver [%s] "
"(mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
return 0;
}
static int __devinit r6040_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
struct r6040_private *lp;
void __iomem *ioaddr;
int err, io_size = R6040_IO_SIZE;
static int card_idx = -1;
int bar = 0;
u16 *adrp;
int i;
pr_info("%s\n", version);
err = pci_enable_device(pdev);
if (err)
goto err_out;
/* this should always be supported */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
goto err_out;
}
pci_set_master(pdev);
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
dev_err(&pdev->dev, "Failed to allocate etherdev\n");
err = -ENOMEM;
goto err_out;
}
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Failed to request PCI regions\n");
goto err_out_free_dev;
}
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
dev_err(&pdev->dev, "ioremap failed for device\n");
err = -EIO;
goto err_out_free_res;
}
/* If PHY status change register is still set to zero it means the
* bootloader didn't initialize it */
if (ioread16(ioaddr + PHY_CC) == 0)
iowrite16(0x9f07, ioaddr + PHY_CC);
/* Init system & device */
lp->base = ioaddr;
dev->irq = pdev->irq;
spin_lock_init(&lp->lock);
pci_set_drvdata(pdev, dev);
/* Set MAC address */
card_idx++;
adrp = (u16 *)dev->dev_addr;
adrp[0] = ioread16(ioaddr + MID_0L);
adrp[1] = ioread16(ioaddr + MID_0M);
adrp[2] = ioread16(ioaddr + MID_0H);
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
if (!(adrp[0] || adrp[1] || adrp[2])) {
netdev_warn(dev, "MAC address not initialized, "
"generating random\n");
random_ether_addr(dev->dev_addr);
}
/* Link new device into r6040_root_dev */
lp->pdev = pdev;
lp->dev = dev;
/* Init RDC private data */
lp->mcr0 = 0x1002;
lp->phy_addr = phy_table[card_idx];
/* The RDC-specific entries in the device structure. */
dev->netdev_ops = &r6040_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
netif_napi_add(dev, &lp->napi, r6040_poll, 64);
lp->mii_bus = mdiobus_alloc();
if (!lp->mii_bus) {
dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
err = -ENOMEM;
goto err_out_unmap;
}
lp->mii_bus->priv = dev;
lp->mii_bus->read = r6040_mdiobus_read;
lp->mii_bus->write = r6040_mdiobus_write;
lp->mii_bus->reset = r6040_mdiobus_reset;
lp->mii_bus->name = "r6040_eth_mii";
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!lp->mii_bus->irq) {
dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
err = -ENOMEM;
goto err_out_mdio;
}
for (i = 0; i < PHY_MAX_ADDR; i++)
lp->mii_bus->irq[i] = PHY_POLL;
err = mdiobus_register(lp->mii_bus);
if (err) {
dev_err(&pdev->dev, "failed to register MII bus\n");
goto err_out_mdio_irq;
}
err = r6040_mii_probe(dev);
if (err) {
dev_err(&pdev->dev, "failed to probe MII bus\n");
goto err_out_mdio_unregister;
}
/* Register net device. After this dev->name assign */
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Failed to register net device\n");
goto err_out_mdio_unregister;
}
return 0;
err_out_mdio_unregister:
mdiobus_unregister(lp->mii_bus);
err_out_mdio_irq:
kfree(lp->mii_bus->irq);
err_out_mdio:
mdiobus_free(lp->mii_bus);
err_out_unmap:
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_free_dev:
free_netdev(dev);
err_out:
return err;
}
static void __devexit r6040_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct r6040_private *lp = netdev_priv(dev);
unregister_netdev(dev);
mdiobus_unregister(lp->mii_bus);
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, r6040_pci_tbl);
static struct pci_driver r6040_driver = {
.name = DRV_NAME,
.id_table = r6040_pci_tbl,
.probe = r6040_init_one,
.remove = __devexit_p(r6040_remove_one),
};
static int __init r6040_init(void)
{
return pci_register_driver(&r6040_driver);
}
static void __exit r6040_cleanup(void)
{
pci_unregister_driver(&r6040_driver);
}
module_init(r6040_init);
module_exit(r6040_cleanup);
| gpl-2.0 |
keeeener/nicki | kernel/drivers/staging/prima/CORE/BAP/src/btampFsm.c | 387 | 104342 | /*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*===========================================================================
b t a m p F s m . C
OVERVIEW:
This software unit holds the implementation of the Finite State Machine that
controls the operation of each individual AMP Physical link.
(Currently, this is limited to ONE link.)
The btampFsm() routine provided by this module is called by the rest of
the BT-AMP PAL module whenever a control plane operation occurs that requires a
major state transition.
DEPENDENCIES:
Are listed for each API below.
Copyright (c) 2008 QUALCOMM Incorporated.
All Rights Reserved.
Qualcomm Confidential and Proprietary
===========================================================================*/
/*===========================================================================
EDIT HISTORY FOR FILE
This section contains comments describing changes made to the module.
Notice that changes are listed in reverse chronological order.
$Header: /prj/qct/asw/engbuilds/scl/users02/jzmuda/gb-bluez/vendor/qcom/proprietary/wlan/libra/CORE/BAP/src/btampFsm.c,v 1.11 2011/03/30 21:52:10 jzmuda Exp jzmuda $
when who what, where, why
---------- --- --------------------------------------------------------
2008-10-16 jez Created module
===========================================================================*/
/* This file is generated from btampFsm.cdd - do not edit manually*/
/* Generated on: Thu Oct 16 15:40:39 PDT 2008 / version 1.2 Beta 1 */
/*----------------------------------------------------------------------------
* Include Files
* -------------------------------------------------------------------------*/
#include "fsmDefs.h"
//#include "btampFsm.h"
#include "bapInternal.h"
#include "btampFsm_ext.h"
// Pick up the BTAMP Timer API definitions
#include "bapApiTimer.h"
// Pick up the BTAMP RSN definitions
#include "bapRsn8021xFsm.h"
#include "bapRsn8021xAuthFsm.h"
// Pick up the SME API definitions
#include "sme_Api.h"
// Pick up the PMC API definitions
#include "pmcApi.h"
// Pick up the BTAMP API defintions for interfacing to External subsystems
#include "bapApiExt.h"
#include "wlan_nlink_common.h"
#include "wlan_btc_svc.h"
// Pick up the DOT11 Frames compiler
// I just need these one "opaque" type definition in order to use the "frames" code
typedef struct sAniSirGlobal *tpAniSirGlobal;
#include "dot11f.h"
#if 0
/*
* Event-related Defines.
* - Ultimately, these events will be values
* - from an enumeration. That are set by some
* - of the following events.
*/
#define eWLAN_BAP_MAC_START_BSS_SUCCESS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STARTED */
#define eWLAN_BAP_MAC_START_FAILS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */
#define eWLAN_BAP_MAC_SCAN_COMPLETE /* bapScanCompleteCallback */
#define eWLAN_BAP_CHANNEL_NOT_SELECTED /* No existing Infra assoc - e.g., use HAL to access the STA LIST and find nothing */
#define eWLAN_BAP_MAC_CONNECT_COMPLETED /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATED */
#define eWLAN_BAP_MAC_CONNECT_FAILED /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */
#define eWLAN_BAP_MAC_CONNECT_INDICATION /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND */
#define eWLAN_BAP_RSN_SUCCESS /* setKey IOCTL from the Auth/Supp App */
#define eWLAN_BAP_RSN_FAILURE /* deAuth IOCTL from the Auth/Supp App */
#define eWLAN_BAP_MAC_KEY_SET_SUCCESS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_KEY_SET */
#define eWLAN_BAP_MAC_INDICATES_MEDIA_DISCONNECTION /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_DISASSOC_IND */
#define eWLAN_BAP_MAC_READY_FOR_CONNECTIONS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STOPPED */
#define eWLAN_BAP_CHANNEL_SELECTION_FAILED /* ??? */
#endif /* 0 */
/*Min and max channel values in 2.4GHz band for operational channel validation
on connect*/
#define WLAN_BAP_MIN_24G_CH 1
#define WLAN_BAP_MAX_24G_CH 14
/* The HCI Disconnect Logical Link Complete Event signalling routine*/
VOS_STATUS
signalHCIDiscLogLinkCompEvent
(
ptBtampContext btampContext, /* btampContext value */
v_U8_t status, /* the BT-AMP status */
v_U16_t log_link_handle, /* The Logical Link that disconnected*/
v_U8_t reason /* the BT-AMP reason code */
);
/* Stubs - TODO : Remove once the functions are available */
int
bapSuppDisconnect(tBtampContext *ctx)
{
// Disconnect function is called internally
// TODO : Need to find, if it disconnect will be issued from bap for supplicant
return ANI_OK;
}
int
bapAuthDisconnect(tBtampContext *ctx)
{
// Disconnect function is called internally
// TODO : Need to find, if it disconnect will be issued from bap for supplicant
return ANI_OK;
}
VOS_STATUS
bapSetKey( v_PVOID_t pvosGCtx, tCsrRoamSetKey *pSetKeyInfo )
{
tWLAN_BAPEvent bapEvent; /* State machine event */
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
ptBtampContext btampContext; /* use btampContext value */
v_U8_t status; /* return the BT-AMP status here */
eHalStatus halStatus;
v_U32_t roamId = 0xFF;
tHalHandle hHal = NULL;
v_U8_t groupMac[ANI_MAC_ADDR_SIZE] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
/* Validate params */
if ((pvosGCtx == NULL) || (pSetKeyInfo == NULL))
{
return VOS_STATUS_E_FAULT;
}
btampContext = VOS_GET_BAP_CB(pvosGCtx);
/* Validate params */
if ( btampContext == NULL)
{
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(btampContext->pvosGCtx);
if (NULL == hHal)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"hHal is NULL in %s", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %x", __FUNCTION__, btampContext);
/* Fill in the event structure */
bapEvent.event = eWLAN_BAP_RSN_SUCCESS;
bapEvent.params = NULL;
/* Signal the successful RSN auth and key exchange event */
/* (You have to signal BEFORE calling sme_RoamSetKey) */
vosStatus = btampFsm(btampContext, &bapEvent, &status);
/* Set the Pairwise Key */
halStatus = sme_RoamSetKey(
hHal,
btampContext->sessionId,
pSetKeyInfo,
&roamId );
if ( halStatus != eHAL_STATUS_SUCCESS )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"[%4d] sme_RoamSetKey returned ERROR status= %d", __LINE__, halStatus );
return VOS_STATUS_E_FAULT;
}
/* Set the Group Key */
vos_mem_copy( pSetKeyInfo->peerMac, groupMac, sizeof( tAniMacAddr ) );;
halStatus = sme_RoamSetKey(
hHal,
btampContext->sessionId,
pSetKeyInfo,
&roamId );
if ( halStatus != eHAL_STATUS_SUCCESS )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"[%4d] sme_RoamSetKey returned ERROR status= %d", __LINE__, halStatus );
return VOS_STATUS_E_FAULT;
}
return vosStatus;
}
/*
* Debug-related Defines.
* - Ultimately, these events will be values
* - from an enumeration. That are set by some
* - of the following events.
*/
#define DUMPLOG_ON
#if defined DUMPLOG_ON
#define DUMPLOG(n, name1, name2, aStr, size) \
if (1) \
{\
int i;\
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%d. %s: %s = \n", n, name1, name2); \
for (i = 0; i < size; i++) \
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%2.2x%s", ((unsigned char *)aStr)[i], i % 16 == 15 ? "\n" : " "); \
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "\n"); \
}
#else
#define DUMPLOG(n, name1, name2, aStr, size)
#endif
/*
* State transition procedures
*/
VOS_STATUS
gotoS1
(
ptBtampContext btampContext, /* btampContext value */
ptWLAN_BAPEvent bapEvent, /* State machine event */
tWLAN_BAPRole BAPDeviceRole,
v_U8_t *status /* return the BT-AMP status here */
)
{
tBtampTLVHCI_Create_Physical_Link_Cmd *pBapHCIPhysLinkCreate
= (tBtampTLVHCI_Create_Physical_Link_Cmd *) bapEvent->params;
tBtampTLVHCI_Accept_Physical_Link_Cmd *pBapHCIPhysLinkAccept
= (tBtampTLVHCI_Accept_Physical_Link_Cmd *) bapEvent->params;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
v_U32_t conAcceptTOInterval;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/* Remember role */
btampContext->BAPDeviceRole = BAPDeviceRole;
switch(BAPDeviceRole)
{
case BT_INITIATOR:
/* Copy down the phy_link_handle value */
btampContext->phy_link_handle = pBapHCIPhysLinkCreate->phy_link_handle;
/* Copy out the key material from the HCI command */
btampContext->key_type = pBapHCIPhysLinkCreate->key_type;
btampContext->key_length = pBapHCIPhysLinkCreate->key_length;
vos_mem_copy(
btampContext->key_material,
pBapHCIPhysLinkCreate->key_material,
32); /* Need a key size define */
break;
case BT_RESPONDER:
/* Copy down the phy_link_handle value */
btampContext->phy_link_handle = pBapHCIPhysLinkAccept->phy_link_handle;
/* Copy out the key material from the HCI command */
btampContext->key_type = pBapHCIPhysLinkAccept->key_type;
btampContext->key_length = pBapHCIPhysLinkAccept->key_length;
vos_mem_copy(
btampContext->key_material,
pBapHCIPhysLinkAccept->key_material,
32); /* Need a key size define */
break;
default:
*status = WLANBAP_ERROR_HOST_REJ_RESOURCES; /* return the BT-AMP status here */
return VOS_STATUS_E_RESOURCES;
}
conAcceptTOInterval = (btampContext->bapConnectionAcceptTimerInterval * 5)/ 8;
/* Start the Connection Accept Timer */
vosStatus = WLANBAP_StartConnectionAcceptTimer (
btampContext,
conAcceptTOInterval);
*status = WLANBAP_STATUS_SUCCESS; /* return the BT-AMP status here */
return VOS_STATUS_SUCCESS;
} //gotoS1
VOS_STATUS
gotoScanning
(
ptBtampContext btampContext, /* btampContext value */
tWLAN_BAPRole BAPDeviceRole,
v_U8_t *status /* return the BT-AMP status here */
)
{
/* Initiate a SCAN request */
//csrScanRequest();
*status = WLANBAP_STATUS_SUCCESS; /* return the BT-AMP status here */
return VOS_STATUS_SUCCESS;
}
#if 0
/*==========================================================================
FUNCTION: convertRoleToBssType
DESCRIPTION: Return one of the following values:
eCSR_BSS_TYPE_INFRASTRUCTURE,
eCSR_BSS_TYPE_IBSS, // an IBSS network we will NOT start
eCSR_BSS_TYPE_START_IBSS, // an IBSS network we will start if no partners detected.
eCSR_BSS_TYPE_WDS_AP, // BT-AMP AP
eCSR_BSS_TYPE_WDS_STA, // BT-AMP station
eCSR_BSS_TYPE_ANY,
============================================================================*/
#endif
eCsrRoamBssType
convertRoleToBssType
(
tWLAN_BAPRole bapRole /* BT-AMP role */
)
{
switch (bapRole)
{
case BT_RESPONDER:
// an WDS network we will join
return eCSR_BSS_TYPE_WDS_STA;
//return eCSR_BSS_TYPE_INFRASTRUCTURE;
//return eCSR_BSS_TYPE_IBSS; // Initial testing with IBSS on both ends makes more sense
case BT_INITIATOR:
// an WDS network we will start if no partners detected.
return eCSR_BSS_TYPE_WDS_AP;
//return eCSR_BSS_TYPE_START_IBSS; // I really should try IBSS on both ends
default:
return eCSR_BSS_TYPE_INFRASTRUCTURE;
}
} // convertRoleToBssType
char hexValue[] = {'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
};
#define BAP_MIN(x, y) ((x) < (y) ? (x) : (y))
#define MAX_BYTES 8
// Each byte will be converted to hex digits followed by a
// punctuation (which is specified in the "delimiter" param.) Thus
// allocate three times the storage.
v_U8_t *
bapBin2Hex(const v_U8_t *bytes, v_U32_t len, char delimiter)
{
static v_U8_t buf[MAX_BYTES*(2+1)];
v_U32_t i;
v_U8_t *ptr;
len = BAP_MIN(len, MAX_BYTES);
for (i = 0, ptr = buf; i < len; i++)
{
*ptr++ = hexValue[ (bytes[i] >> 4) & 0x0f];
*ptr++ = hexValue[ bytes[i] & 0x0f];
*ptr++ = delimiter;
//sprintf(ptr, "%.2x%c", bytes[i], delimiter);
//ptr += 3;
}
// Delete the extra punctuation and null terminate the string
if (len > 0)
ptr--;
*ptr = '\0';
return buf;
}// bapBin2Hex
char bapSsidPrefixValue[] = {'A', 'M', 'P', '-'};
v_U8_t *
convertBSSIDToSSID
(
v_U8_t *bssid /* BSSID value */
)
{
static v_U8_t ssId[32];
vos_mem_copy(
ssId,
bapSsidPrefixValue,
4);
vos_mem_copy(
&ssId[4],
bapBin2Hex(bssid, 6, '-'),
17);
return ssId;
} // convertBSSIDToSSID
VOS_STATUS
convertToCsrProfile
(
ptBtampContext btampContext, /* btampContext value */
eCsrRoamBssType bssType,
tCsrRoamProfile *pProfile /* return the profile info here */
)
{
static v_U8_t btampRSNIE[] = {0x30, 0x14, 0x01, 0x00, 0x00, 0x0f, 0xac, 0x04, 0x01, 0x00,
0x00, 0x0f, 0xac, 0x04, 0x01, 0x00, 0x00, 0x0f, 0xac, 0x02, 0x00, 0x00
};
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
v_S7_t sessionid = -1;
tHalHandle hHal = NULL;
v_U32_t triplet;
v_U8_t regulatoryClass;
v_U8_t firstChannel;
v_U8_t numChannels;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
if (NULL == btampContext)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"btampContext is NULL in %s", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(btampContext->pvosGCtx);
if (NULL == hHal)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"hHal is NULL in %s", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
//Zero out entire roamProfile structure to avoid problems in uninitialized pointers as the structure expands */
//vos_mem_zero(pProfile,sizeof(tCsrRoamProfile));
//Set the BSS Type
//pProfile->BSSType = convertRoleToBssType(btampContext->BAPDeviceRole );
pProfile->BSSType = bssType;
//pProfile->BSSType = eCSR_BSS_TYPE_INFRASTRUCTURE;
//Set the SSID
if ( bssType == eCSR_BSS_TYPE_WDS_STA)
{
pProfile->SSIDs.numOfSSIDs = 2;
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: bssType = %s, SSID specified = %s\n", __FUNCTION__, "eCSR_BSS_TYPE_WDS_STA", convertBSSIDToSSID(btampContext->btamp_Remote_AMP_Assoc.HC_mac_addr));
vos_mem_zero(pProfile->SSIDs.SSIDList[0].SSID.ssId,
sizeof(pProfile->SSIDs.SSIDList[0].SSID.ssId));
vos_mem_copy(pProfile->SSIDs.SSIDList[0].SSID.ssId,
convertBSSIDToSSID(btampContext->btamp_Remote_AMP_Assoc.HC_mac_addr),
21); // Length of BTAMP SSID is 21 bytes
pProfile->SSIDs.SSIDList[0].SSID.length = 21;
vos_mem_zero(pProfile->SSIDs.SSIDList[1].SSID.ssId,
sizeof(pProfile->SSIDs.SSIDList[1].SSID.ssId));
vos_mem_copy(pProfile->SSIDs.SSIDList[1].SSID.ssId,
convertBSSIDToSSID(btampContext->self_mac_addr),
21); // Length of BTAMP SSID is 21 bytes
pProfile->SSIDs.SSIDList[1].SSID.length = 21;
//Set the BSSID to the Remote AP
pProfile->BSSIDs.numOfBSSIDs = 1;
vos_mem_copy(pProfile->BSSIDs.bssid,
btampContext->btamp_Remote_AMP_Assoc.HC_mac_addr,
sizeof( tCsrBssid ) );
}
else if ( bssType == eCSR_BSS_TYPE_WDS_AP)
{
pProfile->SSIDs.numOfSSIDs = 1;
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: bssType = %s, SSID specified = %s\n", __FUNCTION__, "eCSR_BSS_TYPE_WDS_AP", convertBSSIDToSSID(btampContext->self_mac_addr));
vos_mem_zero(pProfile->SSIDs.SSIDList[0].SSID.ssId,
sizeof(pProfile->SSIDs.SSIDList[0].SSID.ssId));
vos_mem_copy(pProfile->SSIDs.SSIDList[0].SSID.ssId,
convertBSSIDToSSID(btampContext->self_mac_addr),
21); // Length of BTAMP SSID is 21 bytes
pProfile->SSIDs.SSIDList[0].SSID.length = 21;
#if 0
//In case you are an AP, don't set the BSSID
pProfile->BSSIDs.numOfBSSIDs = 0;
#endif //0
//Set the BSSID to your "self MAC Addr"
pProfile->BSSIDs.numOfBSSIDs = 1;
vos_mem_copy(pProfile->BSSIDs.bssid,
btampContext->self_mac_addr,
sizeof( tCsrBssid ) );
}
else
// Handle everything else as bssType eCSR_BSS_TYPE_INFRASTRUCTURE
{
pProfile->SSIDs.numOfSSIDs = 1;
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: bssType = %s, SSID specified = %s\n", __FUNCTION__, "eCSR_BSS_TYPE_WDS_STA", convertBSSIDToSSID(btampContext->btamp_Remote_AMP_Assoc.HC_mac_addr));
vos_mem_zero(pProfile->SSIDs.SSIDList[0].SSID.ssId,
sizeof(pProfile->SSIDs.SSIDList[0].SSID.ssId));
vos_mem_copy(pProfile->SSIDs.SSIDList[0].SSID.ssId,
convertBSSIDToSSID(btampContext->btamp_Remote_AMP_Assoc.HC_mac_addr),
21); // Length of BTAMP SSID is 21 bytes
pProfile->SSIDs.SSIDList[0].SSID.length = 21;
//Set the BSSID to the Remote AP
pProfile->BSSIDs.numOfBSSIDs = 1;
vos_mem_copy(pProfile->BSSIDs.bssid,
btampContext->btamp_Remote_AMP_Assoc.HC_mac_addr,
sizeof( tCsrBssid ) );
}
//Always set the Auth Type
//pProfile->negotiatedAuthType = eCSR_AUTH_TYPE_RSN_PSK;
//pProfile->negotiatedAuthType = eCSR_AUTH_TYPE_NONE;
//pProfile->negotiatedAuthType = eCSR_AUTH_TYPE_OPEN_SYSTEM;
pProfile->AuthType.numEntries = 1;
//pProfile->AuthType.authType[0] = eCSR_AUTH_TYPE_OPEN_SYSTEM;
pProfile->AuthType.authType[0] = eCSR_AUTH_TYPE_RSN_PSK;
//Always set the Encryption Type
//pProfile->negotiatedUCEncryptionType = eCSR_ENCRYPT_TYPE_AES;
//pProfile->negotiatedUCEncryptionType = eCSR_ENCRYPT_TYPE_NONE;
pProfile->EncryptionType.numEntries = 1;
//pProfile->EncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_NONE;
pProfile->EncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_AES;
pProfile->mcEncryptionType.numEntries = 1;
//pProfile->mcEncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_NONE;
pProfile->mcEncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_AES;
//set the RSN IE
//This is weird, but it works
pProfile->pRSNReqIE = &btampRSNIE[0];
pProfile->nRSNReqIELength = 0x16; //TODO
//pProfile->pRSNReqIE = NULL;
/** We don't use the WPAIE.But NULL it to avoid being used **/
pProfile->pWPAReqIE = NULL;
pProfile->nWPAReqIELength = 0;
// Identify the operation channel
/* Choose the operation channel from the preferred channel list */
pProfile->operationChannel = 0;
regulatoryClass = 0;
for (triplet = 0; triplet < btampContext->btamp_Remote_AMP_Assoc.HC_pref_num_triplets; triplet++)
{
firstChannel = 0;
numChannels = 0;
/* is this a regulatory class triplet? */
if (btampContext->btamp_Remote_AMP_Assoc.HC_pref_triplets[triplet][0] == 201)
{
/* identify supported 2.4GHz regulatory classes */
switch (btampContext->btamp_Remote_AMP_Assoc.HC_pref_triplets[triplet][1])
{
case 254:
{
/* class 254 is special regulatory class defined by BT HS+3.0 spec that
is valid only for unknown/'mobile' country */
if ((btampContext->btamp_Remote_AMP_Assoc.HC_pref_country[0] == 'X') &&
(btampContext->btamp_Remote_AMP_Assoc.HC_pref_country[1] == 'X'))
{
regulatoryClass = 254;
firstChannel = 1;
numChannels = 11;
}
break;
}
case 12:
{
/* class 12 in the US regulatory domain is 2.4GHz channels 1-11 */
if ((btampContext->btamp_Remote_AMP_Assoc.HC_pref_country[0] == 'U') &&
(btampContext->btamp_Remote_AMP_Assoc.HC_pref_country[1] == 'S'))
{
regulatoryClass = 12;
firstChannel = 1;
numChannels = 11;
}
break;
}
case 4:
{
/* class 4 in the Europe regulatory domain is 2.4GHz channels 1-13 */
if ((btampContext->btamp_Remote_AMP_Assoc.HC_pref_country[0] == 'G') &&
(btampContext->btamp_Remote_AMP_Assoc.HC_pref_country[1] == 'B'))
{
regulatoryClass = 4;
firstChannel = 1;
numChannels = 13;
}
break;
}
case 30:
{
/* class 30 in the Japan regulatory domain is 2.4GHz channels 1-13 */
if ((btampContext->btamp_Remote_AMP_Assoc.HC_pref_country[0] == 'J') &&
(btampContext->btamp_Remote_AMP_Assoc.HC_pref_country[1] == 'P'))
{
regulatoryClass = 30;
firstChannel = 1;
numChannels = 13;
}
break;
}
default:
{
break;
}
}
/* if the next triplet is not another regulatory class triplet then it must be a sub-band
triplet. Skip processing the default channels for this regulatory class triplet and let
the sub-band triplet restrict the available channels */
if (((triplet+1) < btampContext->btamp_Remote_AMP_Assoc.HC_pref_num_triplets) &&
(btampContext->btamp_Remote_AMP_Assoc.HC_pref_triplets[triplet+1][0] != 201))
{
continue;
}
}
else
{
/* if the regulatory class is valid then this is a sub-band triplet */
if (regulatoryClass)
{
firstChannel = btampContext->btamp_Remote_AMP_Assoc.HC_pref_triplets[triplet][0];
numChannels = btampContext->btamp_Remote_AMP_Assoc.HC_pref_triplets[triplet][1];
}
}
if (firstChannel && numChannels)
{
if (!btampContext->btamp_AMP_Assoc.HC_pref_num_triplets)
{
pProfile->operationChannel = firstChannel;
break;
}
else if (((btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][0] + btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][1]) <= firstChannel) ||
((firstChannel + numChannels ) <= btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][0]))
{
continue;
}
else if ((btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][0] + btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][1]) > firstChannel)
{
pProfile->operationChannel = firstChannel;
break;
}
else if ((firstChannel + numChannels) > btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][0])
{
pProfile->operationChannel = btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][0];
break;
}
}
}
if (!pProfile->operationChannel)
{
return VOS_STATUS_E_INVAL;
}
/*Set the selected channel */
sessionid = sme_GetInfraSessionId(hHal);
/*if there is infra session up already, use that channel only for BT AMP
connection, else we can use the user preferred one*/
if(-1 != sessionid)
{
pProfile->operationChannel =
sme_GetInfraOperationChannel(hHal,
sessionid);
}
if(sme_IsChannelValid(hHal, pProfile->operationChannel))
{
btampContext->channel = pProfile->operationChannel;
}
else
{
//no valid channel, not proceeding with connection
return VOS_STATUS_E_INVAL;
}
if ( BT_INITIATOR == btampContext->BAPDeviceRole )
{
pProfile->ChannelInfo.numOfChannels = 1;
pProfile->ChannelInfo.ChannelList = &pProfile->operationChannel;
}
else
{
pProfile->ChannelInfo.numOfChannels = 1;
pProfile->ChannelInfo.ChannelList = &pProfile->operationChannel;
}
// Turn off CB mode
pProfile->CBMode = eCSR_CB_OFF;
//set the phyMode to accept anything
//Taurus means everything because it covers all the things we support
pProfile->phyMode = eCSR_DOT11_MODE_11n; //eCSR_DOT11_MODE_TAURUS; //eCSR_DOT11_MODE_AUTO; /*eCSR_DOT11_MODE_BEST;*/
//set the mode in CFG as well
sme_CfgSetInt(hHal, WNI_CFG_DOT11_MODE, WNI_CFG_DOT11_MODE_11N, NULL, eANI_BOOLEAN_FALSE);
pProfile->bWPSAssociation = eANI_BOOLEAN_FALSE;
//Make sure we DON'T request UAPSD
pProfile->uapsd_mask = 0;
//return the vosStatus
return vosStatus;
} //convertToCsrProfile
VOS_STATUS
gotoStarting
(
ptBtampContext btampContext, /* btampContext value */
ptWLAN_BAPEvent bapEvent, /* State machine event */
eCsrRoamBssType bssType,
v_U8_t *status /* return the BT-AMP status here */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
eHalStatus halStatus;
v_U32_t parseStatus;
/* tHalHandle */
tHalHandle hHal;
tBtampTLVHCI_Write_Remote_AMP_ASSOC_Cmd *pBapHCIWriteRemoteAMPAssoc
= (tBtampTLVHCI_Write_Remote_AMP_ASSOC_Cmd *) bapEvent->params;
tBtampAMP_ASSOC btamp_ASSOC;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
if (NULL == btampContext)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"btampContext is NULL in %s", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
hHal = VOS_GET_HAL_CB(btampContext->pvosGCtx);
if (NULL == hHal)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"hHal is NULL in %s", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
//If we are a BT-Responder, we are assuming we are a BT "slave" and we HAVE
//to "squelch" the slaves frequent (every 1.25ms) polls.
if (eCSR_BSS_TYPE_WDS_STA == bssType)
{
/* Sleep for 300(200) milliseconds - to allow BT through */
vos_sleep( 200 );
/* Signal BT Coexistence code in firmware to prefer WLAN */
WLANBAP_NeedBTCoexPriority ( btampContext, 1);
}
//Tell PMC to exit BMPS;
halStatus = pmcRequestFullPower(
hHal,
WLANBAP_pmcFullPwrReqCB,
btampContext,
eSME_REASON_OTHER);
// JEZ081210: This has to wait until we sync down from
// /main/latest as of 12/4. We are currently at 12/3.
//eSME_FULL_PWR_NEEDED_BY_BAP);
//Need to check the result...because Host may have been told by
//OS to go to standby (D2) device state. In that case, I have to
//fail the HCI Create Physical Link
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH,
"In %s, amp_assoc_remaining_length = %d", __FUNCTION__,
pBapHCIWriteRemoteAMPAssoc->amp_assoc_remaining_length);
#if 0
DUMPLOG(1, __FUNCTION__, "amp_assoc_fragment",
pBapHCIWriteRemoteAMPAssoc->amp_assoc_fragment,
64);
#endif //0
//What about parsing the AMP Assoc structure?
parseStatus = btampUnpackAMP_ASSOC(
hHal,
pBapHCIWriteRemoteAMPAssoc->amp_assoc_fragment,
pBapHCIWriteRemoteAMPAssoc->amp_assoc_remaining_length,
&btamp_ASSOC);
/* Unknown or Reserved TLVs are allowed in the write AMP assoc fragment */
if ((BTAMP_PARSE_SUCCESS != parseStatus ) && (BTAMP_UNKNOWN_TLVS != parseStatus))
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, parseStatus = %d", __FUNCTION__, parseStatus);
*status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM;
return VOS_STATUS_E_BADMSG;
}
//What about writing the peer MAC address, and other info to the BTAMP
//context for this physical link?
if (btamp_ASSOC.AMP_Assoc_MAC_Addr.present == 1)
{
/* Save the peer MAC address */
vos_mem_copy(
btampContext->btamp_Remote_AMP_Assoc.HC_mac_addr,
btamp_ASSOC.AMP_Assoc_MAC_Addr.mac_addr,
sizeof(btampContext->btamp_Remote_AMP_Assoc.HC_mac_addr));
/* Save it in the peer MAC address field */
vos_mem_copy(
btampContext->peer_mac_addr,
btamp_ASSOC.AMP_Assoc_MAC_Addr.mac_addr,
sizeof(btampContext->peer_mac_addr));
}
if (btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.present == 1)
{
/* Save the peer Preferred Channel List */
vos_mem_copy(
btampContext->btamp_Remote_AMP_Assoc.HC_pref_country,
btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.country,
sizeof(btampContext->btamp_Remote_AMP_Assoc.HC_pref_country));
/* Save the peer Preferred Channel List */
btampContext->btamp_Remote_AMP_Assoc.HC_pref_num_triplets =
btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.num_triplets;
if(WLANBAP_MAX_NUM_TRIPLETS <
btampContext->btamp_Remote_AMP_Assoc.HC_pref_num_triplets)
{
btampContext->btamp_Remote_AMP_Assoc.HC_pref_num_triplets =
WLANBAP_MAX_NUM_TRIPLETS;
}
vos_mem_copy(
btampContext->btamp_Remote_AMP_Assoc.HC_pref_triplets,
btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets,
sizeof(btampContext->btamp_Remote_AMP_Assoc.HC_pref_triplets[0]) *
btampContext->btamp_Remote_AMP_Assoc.HC_pref_num_triplets
);
}
if (btamp_ASSOC.AMP_Assoc_Connected_Channel.present == 1)
{
/* Save the peer Connected Channel */
vos_mem_copy(
btampContext->btamp_Remote_AMP_Assoc.HC_cnct_country,
btamp_ASSOC.AMP_Assoc_Connected_Channel.country,
sizeof(btampContext->btamp_Remote_AMP_Assoc.HC_cnct_country));
/* Save the peer Connected Channel */
btampContext->btamp_Remote_AMP_Assoc.HC_cnct_num_triplets =
btamp_ASSOC.AMP_Assoc_Connected_Channel.num_triplets;
if(WLANBAP_MAX_NUM_TRIPLETS <
btampContext->btamp_Remote_AMP_Assoc.HC_cnct_num_triplets)
{
btampContext->btamp_Remote_AMP_Assoc.HC_cnct_num_triplets =
WLANBAP_MAX_NUM_TRIPLETS;
}
vos_mem_copy(
btampContext->btamp_Remote_AMP_Assoc.HC_cnct_triplets,
btamp_ASSOC.AMP_Assoc_Connected_Channel.triplets,
sizeof(btampContext->btamp_Remote_AMP_Assoc.HC_cnct_triplets[0]) *
btampContext->btamp_Remote_AMP_Assoc.HC_cnct_num_triplets
);
}
if (btamp_ASSOC.AMP_Assoc_PAL_Capabilities.present == 1)
{
/* Save the peer PAL Capabilities */
btampContext->btamp_Remote_AMP_Assoc.HC_pal_capabilities
= btamp_ASSOC.AMP_Assoc_PAL_Capabilities.pal_capabilities;
}
if (btamp_ASSOC.AMP_Assoc_PAL_Version.present == 1)
{
/* Save the peer PAL Version */
btampContext->btamp_Remote_AMP_Assoc.HC_pal_version
= btamp_ASSOC.AMP_Assoc_PAL_Version.pal_version;
btampContext->btamp_Remote_AMP_Assoc.HC_pal_CompanyID
= btamp_ASSOC.AMP_Assoc_PAL_Version.pal_CompanyID;
btampContext->btamp_Remote_AMP_Assoc.HC_pal_subversion
= btamp_ASSOC.AMP_Assoc_PAL_Version.pal_subversion;
}
//Set Connection Accept Timeout;
/* Already done in gotoS1() */
//Set gNeedPhysLinkCompEvent;
//JEZ081114: This needs to happen earlier. In gotoS1. Right at HCI Create Physical Link
btampContext->gNeedPhysLinkCompEvent = VOS_TRUE;
//Clear gDiscRequested;
btampContext->gDiscRequested = VOS_FALSE;
//Set gPhysLinkStatus to 0 (no error);
btampContext->gPhysLinkStatus = WLANBAP_STATUS_SUCCESS;
//Set gDiscReason to 0 (no reason);
btampContext->gDiscReason = WLANBAP_STATUS_SUCCESS;
/* Initiate the link as either START or JOIN */
//halStatus = csrRoamOpenSession(&newSession);
/*Added by Luiza:*/
if (btampContext->isBapSessionOpen == FALSE)
{
halStatus = sme_OpenSession(hHal,
WLANBAP_RoamCallback,
btampContext,
// <=== JEZ081210: FIXME
//(tANI_U8 *) btampContext->self_mac_addr,
btampContext->self_mac_addr,
&btampContext->sessionId);
if(eHAL_STATUS_SUCCESS == halStatus)
{
btampContext->isBapSessionOpen = TRUE;
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"sme_OpenSession failed in %s", __FUNCTION__);
*status = WLANBAP_ERROR_NO_CNCT;
return VOS_STATUS_E_FAILURE;
}
}
/* Update the SME Session info for this Phys Link (i.e., for this Phys State Machine instance) */
//bapUpdateSMESessionForThisPhysLink(newSession, PhysLinkHandle);
// Taken care of, above
//halStatus = csrRoamConnect(newSession, bssType);
// Final
vosStatus = convertToCsrProfile (
btampContext, /* btampContext value */
bssType,
&btampContext->csrRoamProfile); /* return the profile info here */
if(VOS_STATUS_E_INVAL == vosStatus)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"Incorrect channel to create AMP link %s", __FUNCTION__);
*status = WLANBAP_ERROR_NO_SUITABLE_CHANNEL;
return VOS_STATUS_E_INVAL;
}
#if 0
halStatus = sme_RoamConnect(VOS_GET_HAL_CB(btampContext->pvosGCtx),
&btampContext->csrRoamProfile,
NULL, /* tScanResultHandle hBssListIn, */
&btampContext->csrRoamId);
#endif //0
//#if 0
halStatus = sme_RoamConnect(hHal,
btampContext->sessionId,
&btampContext->csrRoamProfile,
&btampContext->csrRoamId);
//#endif //0
//Map the halStatus into a vosStatus
return vosStatus;
} //gotoStarting
VOS_STATUS
gotoConnecting(
ptBtampContext btampContext /* btampContext value */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
/* No longer needed. This call has been made in gotoStarting(). */
/* Signal BT Coexistence code in firmware to prefer WLAN */
WLANBAP_NeedBTCoexPriority ( btampContext, 1);
return vosStatus;
} //gotoConnecting
VOS_STATUS
gotoAuthenticating(
ptBtampContext btampContext /* btampContext value */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
/* Signal BT Coexistence code in firmware to prefer WLAN */
WLANBAP_NeedBTCoexPriority ( btampContext, 1);
return vosStatus;
} //gotoAuthenticating
#if 0
VOID initRsnSupplicant()
{
/* This is a NO-OP. The Supplicant waits for MSG 1 */
}
#endif /* 0 */
VOS_STATUS
initRsnSupplicant
(
ptBtampContext btampContext, /* btampContext value */
tWLAN_BAPRole BAPDeviceRole
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
/* This is a NO-OP. The Supplicant waits for MSG 1 */
/* Init RSN FSM */
if (!(suppRsnFsmCreate(btampContext)))
{
/* Send Start Event */
/* RSN_FSM_AUTH_START */
}
else
{
/* RSN Init Failed */
vosStatus = VOS_STATUS_E_FAILURE;
}
/* This is a NO-OP. The Supplicant waits for MSG 1 */
return vosStatus;
}
#if 0
VOID initRsnAuthenticator()
{
/* Signal the Authenticator/Supplicant App that we are associated. */
/* Use an IOCTL? That the app is hanging a read on? Or use a "special" data packet. Again, that the app is waiting on a receive for. */
}
#endif /* 0 */
VOS_STATUS
initRsnAuthenticator
(
ptBtampContext btampContext, /* btampContext value */
tWLAN_BAPRole BAPDeviceRole
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
/* Init RSN FSM */
if (!(authRsnFsmCreate(btampContext)))
{
/* Send Start Event */
}
else
{
/* RSN Init Failed */
vosStatus = VOS_STATUS_E_FAILURE;
}
return vosStatus;
/* Signal the Authenticator/Supplicant App that we are associated. */
/* Use an IOCTL? That the app is hanging a read on? Or use a "special" data packet. Again, that the app is waiting on a receive for. */
}
/* We have to register our STA with TL */
VOS_STATUS
regStaWithTl
(
ptBtampContext btampContext, /* btampContext value */
tWLAN_BAPRole BAPDeviceRole,
tCsrRoamInfo *pCsrRoamInfo
)
{
VOS_STATUS vosStatus;
WLAN_STADescType staDesc;
tANI_S8 rssi = 0;
vos_mem_zero(&staDesc, sizeof(WLAN_STADescType));
/* Fill in everything I know about the STA */
btampContext->ucSTAId = staDesc.ucSTAId = pCsrRoamInfo->staId;
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "BAP register TL ucSTAId=%d\n",
staDesc.ucSTAId );
/* Fill in the peer MAC address */
vos_mem_copy(
staDesc.vSTAMACAddress.bytes,
btampContext->peer_mac_addr,
sizeof(btampContext->peer_mac_addr));
/* Fill in the self MAC address */
vos_mem_copy(
staDesc.vSelfMACAddress.bytes,
btampContext->self_mac_addr,
sizeof(btampContext->peer_mac_addr));
/* Set the STA Type */
staDesc.wSTAType = WLAN_STA_BT_AMP;
// Set the QoS field appropriately, if the info available
if( pCsrRoamInfo->u.pConnectedProfile)
{
btampContext->bapQosCfg.bWmmIsEnabled = //1;
pCsrRoamInfo->u.pConnectedProfile->qosConnection;
}
else
{
btampContext->bapQosCfg.bWmmIsEnabled = 0;
}
// set the QoS field appropriately
if( btampContext->bapQosCfg.bWmmIsEnabled )
{
staDesc.ucQosEnabled = 1;
}
else
{
staDesc.ucQosEnabled = 0;
}
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "BAP register TL QoS_enabled=%d\n",
staDesc.ucQosEnabled );
// UMA is ready we inform TL not to do frame
// translation for WinMob 6.1
//*** Not to enabled UMA.
/* Enable UMA for TX translation only when there is no concurrent session active */
#if defined (FEATURE_WLAN_INTEGRATED_SOC)
staDesc.ucSwFrameTXXlation = 1;
#else
if (vos_concurrent_sessions_running())
{
staDesc.ucSwFrameTXXlation = 1;
}
else
{
staDesc.ucSwFrameTXXlation = 0;
}
#endif
staDesc.ucSwFrameRXXlation = 1;
staDesc.ucAddRmvLLC = 0;
if ( btampContext->ucSecEnabled )
{
staDesc.ucProtectedFrame = 1;
}
else
{
staDesc.ucProtectedFrame = 0;
}
staDesc.ucUcastSig = pCsrRoamInfo->ucastSig;
staDesc.ucBcastSig = pCsrRoamInfo->bcastSig;
staDesc.ucInitState = ( btampContext->ucSecEnabled)?
WLANTL_STA_CONNECTED:WLANTL_STA_AUTHENTICATED;
staDesc.ucIsReplayCheckValid = VOS_FALSE;
if(NULL != pCsrRoamInfo->pBssDesc)
{
rssi = pCsrRoamInfo->pBssDesc->rssi;
}
/* register our STA with TL */
vosStatus = WLANTL_RegisterSTAClient
(
btampContext->pvosGCtx,
WLANBAP_STARxCB,
WLANBAP_TxCompCB,
(WLANTL_STAFetchPktCBType)WLANBAP_STAFetchPktCB,
&staDesc ,
rssi);
if ( !VOS_IS_STATUS_SUCCESS( vosStatus ) )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"%s: WLANTL_RegisterSTAClient() failed to register. Status= %d [0x%08lX]",
__FUNCTION__, vosStatus, vosStatus );
}
if ( ! btampContext->ucSecEnabled )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_MED,
"open/shared auth StaId= %d. Changing TL state to AUTHENTICATED at Join time", btampContext->ucSTAId);
// Connections that do not need Upper layer auth, transition TL directly
// to 'Authenticated' state.
vosStatus = WLANTL_ChangeSTAState( btampContext->pvosGCtx, staDesc.ucSTAId,
WLANTL_STA_AUTHENTICATED );
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_MED,
"ULA auth StaId= %d. Changing TL state to CONNECTED at Join time", btampContext->ucSTAId );
vosStatus = WLANTL_ChangeSTAState( btampContext->pvosGCtx, staDesc.ucSTAId,
WLANTL_STA_CONNECTED );
}
return VOS_STATUS_SUCCESS;
} /* regStaWithTl */
#if 0
/*==========================================================================
FUNCTION: determineChan
DESCRIPTION: Return the current channel we are to operate on
============================================================================*/
#endif
VOS_STATUS
determineChan
(
ptBtampContext btampContext, /* btampContext value */
tWLAN_BAPRole BAPDeviceRole,
v_U32_t *channel, /* Current channel */
v_U8_t *status /* return the BT-AMP status here */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
v_U32_t activeFlag; /* Channel active flag */
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
switch(BAPDeviceRole)
{
case BT_INITIATOR:
/* if an Infra assoc already exists, return that channel. */
/* or use the results from the Scan to determine the least busy channel. How? */
/* For now, just do this. */
vosStatus = WLANBAP_GetCurrentChannel (btampContext, channel, &activeFlag);
break;
case BT_RESPONDER:
/* return the value obtained from the Preferred Channels field of the AMP Assoc structure from the BT-AMP peer (device A) */
/* No! I don't have that yet. */
/* For now, just do this. */
vosStatus = WLANBAP_GetCurrentChannel (btampContext, channel, &activeFlag);
break;
default:
*status = WLANBAP_ERROR_HOST_REJ_RESOURCES; /* return the BT-AMP status here */
return VOS_STATUS_E_RESOURCES;
}
*status = WLANBAP_STATUS_SUCCESS; /* return the BT-AMP status here */
return vosStatus;
} // determineChan
VOS_STATUS
gotoDisconnected
(
ptBtampContext btampContext /* btampContext value */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
//Is it legitimate to always make this call?
//What if pmcRequestFullPower wasn't called?
//Tell PMC to resume BMPS; /* Whatever the previous BMPS "state" was */
//Comment this out until such time as we have PMC support
//halStatus = pmcResumePower ( hHal);
/* Signal BT Coexistence code in firmware to no longer prefer WLAN */
WLANBAP_NeedBTCoexPriority ( btampContext, 0);
//Map the halStatus into a vosStatus
return vosStatus;
} // gotoDisconnected
VOS_STATUS
gotoDisconnecting
(
ptBtampContext btampContext, /* btampContext value */
v_U8_t needPhysLinkCompEvent,
v_U8_t physLinkStatus, /* BT-AMP disconnecting status */
// v_U8_t statusPresent, /* BT-AMP disconnecting status present */
v_U8_t discRequested,
v_U8_t discReason /* BT-AMP disconnecting reason */
)
{
// gNeedPhysLinkCompEvent
btampContext->gNeedPhysLinkCompEvent = needPhysLinkCompEvent;
// gPhysLinkStatus
btampContext->gPhysLinkStatus = physLinkStatus; /* BT-AMP disconnecting status */
// gDiscRequested
btampContext->gDiscRequested = discRequested;
// gDiscReason
btampContext->gDiscReason = discReason; /* BT-AMP disconnecting reason */
//WLANBAP_DeInitLinkSupervision( btampHandle);
//WLANBAP_StopLinkSupervisionTimer(btampContext);
/* Inform user space that no AMP channel is in use, for AFH purposes */
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_LOW,
"Calling send_btc_nlink_msg() with AMP channel = 0");
send_btc_nlink_msg(WLAN_AMP_ASSOC_DONE_IND, 0);
return VOS_STATUS_SUCCESS;
} //gotoDisconnecting
VOS_STATUS
gotoConnected
(
ptBtampContext btampContext /* btampContext value */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
ptBtampHandle btampHandle = ( ptBtampHandle)btampContext;
//#if 0
/* Stop the Connection Accept Timer */
vosStatus = WLANBAP_StopConnectionAcceptTimer (btampContext);
//#endif
///*De-initialize the timer */
//vosStatus = WLANBAP_DeinitConnectionAcceptTimer(btampContext);
/* Signal BT Coex in firmware to now honor only priority BT requests */
WLANBAP_NeedBTCoexPriority ( btampContext, 2);
// If required after successful Upper layer auth, transition TL
// to 'Authenticated' state.
if ( btampContext->ucSecEnabled )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_MED,
"open/shared auth StaId= %d. Changing TL state to AUTHENTICATED at Join time", btampContext->ucSTAId);
vosStatus = WLANTL_ChangeSTAState(
btampContext->pvosGCtx,
btampContext->ucSTAId,
WLANTL_STA_AUTHENTICATED );
}
btampContext->dataPktPending = VOS_FALSE;
vosStatus = WLANBAP_InitLinkSupervision( btampHandle);
/* Inform user space of the AMP channel selected, for AFH purposes */
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_LOW,
"Calling send_btc_nlink_msg() with AMP channel %d", btampContext->channel);
send_btc_nlink_msg(WLAN_AMP_ASSOC_DONE_IND, btampContext->channel);
return vosStatus;
} //gotoConnected
/* the HCI Event signalling routine*/
VOS_STATUS
signalHCIPhysLinkCompEvent
(
ptBtampContext btampContext, /* btampContext value */
v_U8_t status /* the BT-AMP status */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/* Format the Physical Link Complete event to return... */
bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_PHYSICAL_LINK_COMPLETE_EVENT;
bapHCIEvent.u.btampPhysicalLinkCompleteEvent.present = 1;
bapHCIEvent.u.btampPhysicalLinkCompleteEvent.status = status;
bapHCIEvent.u.btampPhysicalLinkCompleteEvent.phy_link_handle
= btampContext->phy_link_handle;
bapHCIEvent.u.btampPhysicalLinkCompleteEvent.ch_number
= btampContext->channel;
if(WLANBAP_STATUS_SUCCESS == status)
{
/* Start the Tx packet monitoring timer */
WLANBAP_StartTxPacketMonitorTimer(btampContext);
}
else
{ //reset the PL handle
btampContext->phy_link_handle = 0;
}
vosStatus = (*btampContext->pBapHCIEventCB)
(
btampContext->pHddHdl, /* this refers the BSL per application context */
&bapHCIEvent, /* This now encodes ALL event types */
VOS_TRUE /* Flag to indicate assoc-specific event */
);
return vosStatus;
} /* signalHCIPhysLinkCompEvent */
/* the HCI Disconnect Complete Event signalling routine*/
VOS_STATUS
signalHCIPhysLinkDiscEvent
(
ptBtampContext btampContext, /* btampContext value */
v_U8_t status, /* the BT-AMP status */
v_U8_t reason /* the BT-AMP reason code */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */
v_U8_t i;
tpBtampLogLinkCtx pLogLinkContext = NULL;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#ifdef BAP_DEBUG
/* Trace the tBtampCtx being passed in. */
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH,
"WLAN BAP Context Monitor: btampContext value = %x in %s:%d", btampContext, __FUNCTION__, __LINE__ );
#endif //BAP_DEBUG
/* Loop disconnecting all Logical Links on this Physical Link */
for (i = 0 ; i < WLANBAP_MAX_LOG_LINKS; i++)
{
pLogLinkContext = &(btampContext->btampLogLinkCtx[i]);
if (pLogLinkContext->present == VOS_TRUE)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"WLAN BAP: Deleting logical link entry %d in %s", i,
__FUNCTION__);
/* Mark this Logical Link index value as free */
pLogLinkContext->present = VOS_FALSE;
// signalHCIDiscLogLink(status = SUCCESS, reason = CONNECTION_TERM_BY_REMOTE_HOST);
signalHCIDiscLogLinkCompEvent
( btampContext,
WLANBAP_STATUS_SUCCESS,
i, // logical link
// I don't know how to signal CONNECTION_TERM_BY_REMOTE_HOST
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
}
}
/*Reset current_log_link_index and total_log_link_index values*/
btampContext->current_log_link_index = 0;
btampContext->total_log_link_index = 0;
/* Format the Physical Link Disconnect Complete event to return... */
bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_DISCONNECT_PHYSICAL_LINK_COMPLETE_EVENT;
bapHCIEvent.u.btampDisconnectPhysicalLinkCompleteEvent.present = 1;
bapHCIEvent.u.btampDisconnectPhysicalLinkCompleteEvent.status = status;
bapHCIEvent.u.btampDisconnectPhysicalLinkCompleteEvent.reason = reason;//uncommented to debug
bapHCIEvent.u.btampDisconnectPhysicalLinkCompleteEvent.phy_link_handle
= btampContext->phy_link_handle;
/* Stop the Tx packet monitoring timer */
WLANBAP_StopTxPacketMonitorTimer(btampContext);
/*Need to clean up the phy link handle as we are disconnected at this
point
?? - do we need to do any more cleanup on this*/
btampContext->phy_link_handle = 0;
vosStatus = (*btampContext->pBapHCIEventCB)
(
btampContext->pHddHdl, /* this refers the BSL per application context */
&bapHCIEvent, /* This now encodes ALL event types */
VOS_TRUE /* Flag to indicate assoc-specific event */
);
return vosStatus;
} /* signalHCIPhysLinkDiscEvent */
/* the HCI Channel Select Event signalling routine*/
VOS_STATUS
signalHCIChanSelEvent
(
ptBtampContext btampContext /* btampContext value */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/* Format the Physical Link Disconnect Complete event to return... */
bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_CHANNEL_SELECTED_EVENT;
bapHCIEvent.u.btampChannelSelectedEvent.present = 1;
bapHCIEvent.u.btampChannelSelectedEvent.phy_link_handle
= btampContext->phy_link_handle;
vosStatus = (*btampContext->pBapHCIEventCB)
(
btampContext->pHddHdl, /* this refers the BSL per application context */
&bapHCIEvent, /* This now encodes ALL event types */
VOS_TRUE /* Flag to indicate assoc-specific event */
);
return vosStatus;
} /* signalHCIChanSelEvent */
/* the HCI Disconnect Logical Link Complete Event signalling routine*/
VOS_STATUS
signalHCIDiscLogLinkCompEvent
(
ptBtampContext btampContext, /* btampContext value */
v_U8_t status, /* the BT-AMP status */
v_U16_t log_link_handle, /* The Logical Link that disconnected*/
v_U8_t reason /* the BT-AMP reason code */
)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/* Format the Logical Link Disconnect Complete event to return... */
bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_DISCONNECT_LOGICAL_LINK_COMPLETE_EVENT;
bapHCIEvent.u.btampDisconnectLogicalLinkCompleteEvent.present = 1;
bapHCIEvent.u.btampDisconnectLogicalLinkCompleteEvent.status = status;
bapHCIEvent.u.btampDisconnectLogicalLinkCompleteEvent.reason = reason;
bapHCIEvent.u.btampDisconnectLogicalLinkCompleteEvent.log_link_handle
= (log_link_handle << 8) + btampContext->phy_link_handle;
vosStatus = (*btampContext->pBapHCIEventCB)
(
btampContext->pHddHdl, /* this refers the BSL per application context */
&bapHCIEvent, /* This now encodes ALL event types */
VOS_TRUE /* Flag to indicate assoc-specific event */
);
return vosStatus;
} /* signalHCIDiscLogLinkCompEvent */
// These are needed to recognize RSN suite types
#define WLANBAP_RSN_OUI_SIZE 4
tANI_U8 pRSNOui00[ WLANBAP_RSN_OUI_SIZE ] = { 0x00, 0x0F, 0xAC, 0x00 }; // group cipher
tANI_U8 pRSNOui01[ WLANBAP_RSN_OUI_SIZE ] = { 0x00, 0x0F, 0xAC, 0x01 }; // WEP-40 or RSN
tANI_U8 pRSNOui02[ WLANBAP_RSN_OUI_SIZE ] = { 0x00, 0x0F, 0xAC, 0x02 }; // TKIP or RSN-PSK
tANI_U8 pRSNOui03[ WLANBAP_RSN_OUI_SIZE ] = { 0x00, 0x0F, 0xAC, 0x03 }; // Reserved
tANI_U8 pRSNOui04[ WLANBAP_RSN_OUI_SIZE ] = { 0x00, 0x0F, 0xAC, 0x04 }; // AES-CCMP
tANI_U8 pRSNOui05[ WLANBAP_RSN_OUI_SIZE ] = { 0x00, 0x0F, 0xAC, 0x05 }; // WEP-104
#define GET_IE_LEN_IN_BSS(lenInBss) ( lenInBss + sizeof(lenInBss) - ((int) OFFSET_OF( tSirBssDescription, ieFields)))
/* Incoming Association indication validation predicate */
v_U32_t
validAssocInd
(
ptBtampContext btampContext, /* btampContext value */
tCsrRoamInfo *pRoamInfo
)
{
/* tHalHandle */
tHalHandle hHal = VOS_GET_HAL_CB(btampContext->pvosGCtx);
v_U32_t ieLen;
/* For now, always return true */
return VOS_TRUE;
/* Check for a valid peer MAC address */
/* For an incoming Assoc Indication, the peer MAC address
* should match the value that the BlueTooth AMP
* configured us with.
*/
if ( !vos_mem_compare( btampContext->peer_mac_addr,
pRoamInfo->peerMac,
sizeof(btampContext->peer_mac_addr) ))
{
/* Return not valid */
return VOS_FALSE;
}
/* JEZ081115: For now, ignore the RSN IE */
/* Otherwise, it is valid */
return VOS_TRUE;
/* Check for a trivial case: IEs missing */
if( pRoamInfo->prsnIE == NULL )
{
//btampContext->ieFields = NULL;
//btampContext->ieLen = 0;
/* Return not valid */
return VOS_FALSE;
}
//btampContext->ieLen = GET_IE_LEN_IN_BSS( pBssDesc->length );
//ieLen = GET_IE_LEN_IN_BSS( pBssDesc->length );
ieLen = pRoamInfo->rsnIELen;
/* Check for a trivial case: IEs zero length */
//if( btampContext->ieLen == 0 )
if( ieLen == 0 )
{
//btampContext->ieFields = NULL;
//btampContext->ieLen = 0;
/* Return not valid */
return VOS_FALSE;
}
{
// --- Start of block ---
tDot11fBeaconIEs dot11BeaconIEs;
tDot11fIESSID *pDot11SSID;
tDot11fIERSN *pDot11RSN;
// JEZ081215: This really needs to be updated to just validate the RSN IE.
// Validating the SSID can be done directly from...
// "Unpack" really wants tpAniSirGlobal (pMac) as its first param.
// But since it isn't used, I just pass in some arbitrary "context" pointer.
// So hHalHandle will make it happy.
dot11fUnpackBeaconIEs((tpAniSirGlobal) hHal,
(tANI_U8 *) pRoamInfo->prsnIE,
ieLen,
&dot11BeaconIEs);
//DUMPLOG(9, __FUNCTION__, "dot11BeaconIEs", &dot11BeaconIEs, 64);
pDot11SSID = &dot11BeaconIEs.SSID;
// Assume there wasn't an SSID in the Assoc Request
btampContext->assocSsidLen = 0;
if (pDot11SSID->present )
{
//DUMPLOG(10, __FUNCTION__, "pDot11SSID present", pDot11SSID, 64);
btampContext->assocSsidLen = pDot11SSID->num_ssid;
vos_mem_copy(btampContext->assocSsid,
pDot11SSID->ssid,
btampContext->assocSsidLen );
}
else
return VOS_FALSE;
// Check the validity of the SSID against our SSID value
if ( !vos_mem_compare( btampContext->ownSsid,
pDot11SSID->ssid,
btampContext->ownSsidLen ))
{
/* Return not valid */
return VOS_FALSE;
}
pDot11RSN = &dot11BeaconIEs.RSN;
// Assume there wasn't an RSN IE in the Assoc Request
//btampContext->assocRsnIeLen = 0;
if (pDot11RSN->present )
{
//DUMPLOG(10, __FUNCTION__, "pDot11RSN present", pDot11RSN, 64);
//The 802.11 BT-AMP PAL only supports WPA2-PSK
if (!vos_mem_compare(pRSNOui02, // RSN-PSK
pDot11RSN->akm_suites[0],
WLANBAP_RSN_OUI_SIZE))
return VOS_FALSE;
//The 802.11 BT-AMP PAL only supports AES-CCMP Unicast
if (!vos_mem_compare(pRSNOui04, // AES-CCMP
pDot11RSN->pwise_cipher_suites[0],
WLANBAP_RSN_OUI_SIZE))
return VOS_FALSE;
}
else
return VOS_FALSE;
} // --- End of block ---
/* Otherwise, it is valid */
return VOS_TRUE;
} /* validAssocInd */
/* the change state function*/
void
btampfsmChangeToState
(
BTAMPFSM_INSTANCEDATA_T *instance,
BTAMPFSM_STATES_T state
)
{
instance->stateVar = state;
//BTAMPFSM_ENTRY_FLAG_T disconnectedEntry;
}
/* Physical Link state machine function */
//int
VOS_STATUS
btampFsm
(
//BTAMPFSM_INSTANCEDATA_T *instanceVar
ptBtampContext btampContext, /* btampContext value */
// tBtampSessCtx *tpBtampSessCtx, /* btampContext value */
ptWLAN_BAPEvent bapEvent, /* State machine event */
v_U8_t *status /* return the BT-AMP status here */
)
{
/* Retrieve the phy link state machine structure
* from the btampContext value
*/
BTAMPFSM_INSTANCEDATA_T *instanceVar;
v_U32_t msg = bapEvent->event; /* State machine input event message */
v_U32_t channel; /* Current channel */
v_U32_t activeFlag; /* Channel active flag */
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
ptBtampHandle btampHandle = ( ptBtampHandle)btampContext;
v_U8_t ucSTAId; /* The StaId (used by TL, PE, and HAL) */
v_PVOID_t pHddHdl; /* Handle to return BSL context in */
tHalHandle hHal = NULL;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/* Validate params */
if (btampHandle == NULL)
{
return VOS_STATUS_E_FAULT;
}
instanceVar = &(btampContext->bapPhysLinkMachine);
hHal = VOS_GET_HAL_CB(btampContext->pvosGCtx);
if (NULL == hHal)
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,
"hHal is NULL in %s", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
#define CHANNEL_NOT_SELECTED (WLANBAP_GetCurrentChannel (btampContext, &channel, &activeFlag) != VOS_STATUS_SUCCESS)
/*Initialize BTAMP PAL status code being returned to the btampFsm caller */
*status = WLANBAP_STATUS_SUCCESS;
switch(instanceVar->stateVar)
{
case DISCONNECTED:
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_CREATE))
{
/*Transition from DISCONNECTED to S1 (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "DISCONNECTED", "S1");
#if 0
/* This will have issues in multisession. Need not close the session */
/* TODO : Need to have better handling */
if(btampContext->isBapSessionOpen == TRUE)//We want to close only BT-AMP Session
{
sme_CloseSession(VOS_GET_HAL_CB(btampContext->pvosGCtx),
btampContext->sessionId);
/*Added by Luiza:*/
btampContext->isBapSessionOpen = FALSE;
}
#endif
/* Set BAP device role */
vosStatus = gotoS1( btampContext, bapEvent, BT_INITIATOR, status);
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, cmd status is %d", __FUNCTION__, *status);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,S1);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_ACCEPT))
{
/*Transition from DISCONNECTED to S1 (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "DISCONNECTED", "S1");
#if 0
if(btampContext->isBapSessionOpen == TRUE)
{
sme_CloseSession(VOS_GET_HAL_CB(btampContext->pvosGCtx),
btampContext->sessionId);
/*Added by Luiza:*/
btampContext->isBapSessionOpen = FALSE;
}
/*Action code for transition */
#endif
/* Set BAP device role */
vosStatus = gotoS1(btampContext, bapEvent, BT_RESPONDER, status);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,S1);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "DISCONNECTED", msg);
/* Intentionally left blank */
}
break;
case S1:
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_WRITE_REMOTE_AMP_ASSOC
) && (btampContext->BAPDeviceRole == BT_INITIATOR && !(CHANNEL_NOT_SELECTED)))
{
/*Transition from S1 to STARTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "S1", "STARTING");
/*Action code for transition */
vosStatus = determineChan(btampContext, BT_INITIATOR, &channel, status);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,STARTING);
// This has to be commented out until I get the BT-AMP SME/CSR changes
vosStatus = gotoStarting( btampContext, bapEvent, eCSR_BSS_TYPE_WDS_AP, status);
if (VOS_STATUS_SUCCESS != vosStatus)
{
btampfsmChangeToState(instanceVar, S1);
}
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_TIMER_CONNECT_ACCEPT_TIMEOUT))
{
/*Transition from S1 to DISCONNECTED (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "S1", "DISCONNECTED");
/*Action code for transition */
/* Set everything back as dis-connected */
gotoDisconnected( btampContext);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTED);
/*Signal the disconnect */
signalHCIPhysLinkCompEvent( btampContext, WLANBAP_ERROR_HOST_TIMEOUT);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT))
{
/*Transition from S1 to DISCONNECTED (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "S1", "DISCONNECTED");
/*Action code for transition */
gotoDisconnected(btampContext);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTED);
/*Signal the successful physical link disconnect */
signalHCIPhysLinkDiscEvent
( btampContext,
WLANBAP_STATUS_SUCCESS,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
/*Signal the unsuccessful physical link creation */
signalHCIPhysLinkCompEvent( btampContext, WLANBAP_ERROR_NO_CNCT );
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_WRITE_REMOTE_AMP_ASSOC
) && (btampContext->BAPDeviceRole == BT_RESPONDER))
{
/*Transition from S1 to STARTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "S1", "STARTING");
/*Action code for transition */
//determineChan(BT_RESPONDER);
vosStatus = determineChan(btampContext, BT_RESPONDER, &channel, status);
btampfsmChangeToState(instanceVar,STARTING);//Moved to here to debug
// This has to be commented out until I get the BT-AMP SME/CSR changes
/*Advance outer statevar */
// btampfsmChangeToState(instanceVar,STARTING);
vosStatus = gotoStarting( btampContext, bapEvent, eCSR_BSS_TYPE_WDS_STA, status);
if (VOS_STATUS_SUCCESS != vosStatus)
{
btampfsmChangeToState(instanceVar, S1);
}
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_WRITE_REMOTE_AMP_ASSOC
) && (btampContext->BAPDeviceRole == BT_INITIATOR && CHANNEL_NOT_SELECTED))
{
/*Transition from S1 to SCANNING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "S1", "SCANNING");
/*Action code for transition */
gotoScanning(btampContext, BT_RESPONDER, status);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,SCANNING);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "S1", msg);
/* Intentionally left blank */
}
break;
case STARTING:
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_START_BSS_SUCCESS
) && (btampContext->BAPDeviceRole == BT_INITIATOR))
{
/*Transition from STARTING to CONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "STARTING", "CONNECTING");
btampfsmChangeToState(instanceVar,CONNECTING);//Moved to debug
/*Set the selected channel */
/*should have been already set */
btampContext->channel = ( 0 == btampContext->channel )?1:btampContext->channel;
/*Action code for transition */
signalHCIChanSelEvent(btampContext);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT))
{
/*Transition from STARTING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "STARTING", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
// Danlin, where are the richer reason codes?
// I want to be able to convey everything 802.11 supports...
eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_NO_CNCT,
//VOS_TRUE, // Should be VOS_FALSE !!!
VOS_FALSE,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
// It is NOT clear that we need to send the Phy Link Disconnect
// Complete Event here.
signalHCIPhysLinkDiscEvent
( btampContext,
WLANBAP_STATUS_SUCCESS,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_CHANNEL_SELECTION_FAILED))
{
/*Transition from STARTING to DISCONNECTED (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "STARTING", "DISCONNECTED");
gotoDisconnected(btampContext);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTED);
/*Action code for transition */
signalHCIPhysLinkCompEvent( btampContext, WLANBAP_ERROR_HOST_REJ_RESOURCES );
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_START_BSS_SUCCESS
) && (btampContext->BAPDeviceRole == BT_RESPONDER))
{
/*Transition from STARTING to CONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "STARTING", "CONNECTING");
/* Set the selected channel */
/*should have been already set */
btampContext->channel = ( 0 == btampContext->channel )?1:btampContext->channel;
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,CONNECTING);
/*Action code for transition */
gotoConnecting(btampContext);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_TIMER_CONNECT_ACCEPT_TIMEOUT))
{
/*Transition from STARTING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "STARTING", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_HOST_TIMEOUT,
VOS_FALSE,
0);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_START_FAILS))
{
/*Transition from STARTING to DISCONNECTED (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "STARTING", "DISCONNECTED");
/*Action code for transition */
gotoDisconnected(btampContext);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTED);
signalHCIPhysLinkCompEvent( btampContext, WLANBAP_ERROR_MAX_NUM_CNCTS );
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "STARTING", msg);
/* Intentionally left blank */
}
break;
case CONNECTING:
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_CONNECT_COMPLETED
) && (btampContext->BAPDeviceRole == BT_RESPONDER))
{
/*Transition from CONNECTING to AUTHENTICATING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTING", "AUTHENTICATING");
//VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTING", "CONNECTED");
gotoAuthenticating(btampContext);
/*Action code for transition */
initRsnSupplicant(btampContext, BT_RESPONDER);
#if 1
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,AUTHENTICATING);
#else
/*Action code for transition */
signalHCIPhysLinkCompEvent(btampContext, WLANBAP_STATUS_SUCCESS);
gotoConnected(btampContext);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,CONNECTED);
#endif
/* register our STA with TL */
regStaWithTl (
btampContext, /* btampContext value */
BT_RESPONDER,
(tCsrRoamInfo *)bapEvent->params);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT))
{
/*Transition from CONNECTING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTING", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_NO_CNCT,
//VOS_TRUE, // Should be VOS_FALSE !!!
VOS_FALSE,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
// It is NOT clear that we need to send the Phy Link Disconnect
// Complete Event here.
signalHCIPhysLinkDiscEvent
( btampContext,
WLANBAP_STATUS_SUCCESS,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_CONNECT_INDICATION
//) && (bssDesc indicates an invalid peer MAC Addr or SecParam)){
) && !validAssocInd(btampContext, (tCsrRoamInfo *)bapEvent->params))
{
/*Transition from CONNECTING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTING", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect(DEAUTH);
//JEZ081120: Danlin points out that I could just ignore this
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_DEAUTH);
//eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_AUTHENT_FAILURE,
VOS_FALSE,
0);
/*Set the status code being returned to the btampFsm caller */
*status = WLANBAP_ERROR_AUTHENT_FAILURE;
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_CONNECT_INDICATION
//) && (bssDesc indicates a valid MAC Addr and SecParam)){
) && validAssocInd(btampContext, (tCsrRoamInfo *)bapEvent->params))
{
/*Transition from CONNECTING to VALIDATED (both without substates)*/
//VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTING", "VALIDATED");
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTING", "AUTHENTICATING");
//VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTING", "CONNECTED");
/*Action code for transition */
// JEZ081027: This one is a pain. Since we are responding in the
// callback itself. This messes up my state machine.
//csrRoamAccept();
// No! This is fine.
/*Set the status code being returned to the btampFsm caller */
*status = WLANBAP_STATUS_SUCCESS;
/* JEZ081215: N.B.: Currently, I don't get the
* eCSR_ROAM_RESULT_WDS_ASSOCIATED as an AP.
* So, I have to register with TL, here. This
* seems weird.
*/
/* register our STA with TL */
regStaWithTl (
btampContext, /* btampContext value */
BT_INITIATOR,
(tCsrRoamInfo *)bapEvent->params );
gotoAuthenticating(btampContext);
/*Action code for transition */
initRsnAuthenticator(btampContext, BT_INITIATOR);
#if 1
/*Advance outer statevar */
//btampfsmChangeToState(instanceVar,VALIDATED);
btampfsmChangeToState(instanceVar,AUTHENTICATING);
#else
/*Action code for transition */
signalHCIPhysLinkCompEvent(btampContext, WLANBAP_STATUS_SUCCESS);
gotoConnected(btampContext);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,CONNECTED);
#endif
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_CONNECT_FAILED))
{
/*Transition from CONNECTING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTING", "DISCONNECTING");
/*Action code for transition */
sme_RoamDisconnect(hHal,
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
/* Section 3.1.8 and section 3.1.9 have contradictory semantics for 0x16.
* 3.1.8 is "connection terminated by local host". 3.1.9 is "failed connection".
*/
//gotoDisconnecting(FAILED_CONNECTION);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST, //FAILED_CONNECTION
VOS_FALSE,
0);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_TIMER_CONNECT_ACCEPT_TIMEOUT))
{
/*Transition from CONNECTING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTING", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_HOST_TIMEOUT,
VOS_FALSE,
0);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "CONNECTING", msg);
/* Intentionally left blank */
}
break;
case AUTHENTICATING:
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_RSN_SUCCESS
) && (btampContext->BAPDeviceRole == BT_RESPONDER))
{
/*Transition from AUTHENTICATING to KEYING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "AUTHENTICATING", "KEYING");
/*Action code for transition */
//sme_RoamSetContext();
#if 0
sme_RoamSetKey(
VOS_GET_HAL_CB(btampContext->pvosGCtx),
btampContext->sessionId,
tSirMacAddr peerBssId,
eCsrEncryptionType encryptType,
tANI_U16 keyLength,
tANI_U8 *pKey,
VOS_TRUE, // TRUE
tANI_U8 paeRole);
#endif //0
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,KEYING);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_RSN_SUCCESS
) && (btampContext->BAPDeviceRole == BT_INITIATOR))
{
/*Transition from AUTHENTICATING to KEYING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "AUTHENTICATING", "KEYING");
/*Action code for transition */
//sme_RoamSetContext();
#if 0
sme_RoamSetKey(
VOS_GET_HAL_CB(btampContext->pvosGCtx),
btampContext->sessionId,
tSirMacAddr peerBssId,
eCsrEncryptionType encryptType,
tANI_U16 keyLength,
tANI_U8 *pKey,
VOS_TRUE, // TRUE
tANI_U8 paeRole);
#endif //0
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,KEYING);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_TIMER_CONNECT_ACCEPT_TIMEOUT))
{
/*Transition from AUTHENTICATING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s ConnectAcceptTimeout", __FUNCTION__, "AUTHENTICATING", "DISCONNECTING");
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_HOST_TIMEOUT,
VOS_FALSE,
0);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
/*Action code for transition */
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT))
{
/*Transition from AUTHENTICATING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s Physicallink Disconnect", __FUNCTION__, "AUTHENTICATING", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_NO_CNCT,
//VOS_TRUE, // Should be VOS_FALSE !!!
VOS_FALSE,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
// It is NOT clear that we need to send the Phy Link Disconnect
// Complete Event here.
signalHCIPhysLinkDiscEvent
( btampContext,
WLANBAP_STATUS_SUCCESS,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_RSN_FAILURE))
{
/*Transition from AUTHENTICATING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s RSN Failure", __FUNCTION__, "AUTHENTICATING", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect(DEAUTH);
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_DEAUTH);
//eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_AUTHENT_FAILURE,
VOS_FALSE,
0);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "AUTHENTICATING", msg);
/* Intentionally left blank */
}
break;
case CONNECTED:
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT))
{
/*Transition from CONNECTED to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTED", "DISCONNECTING");
gotoDisconnecting(
btampContext,
VOS_FALSE,
0,
VOS_TRUE,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
WLANBAP_DeInitLinkSupervision(( ptBtampHandle)btampContext);
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_INDICATES_MEDIA_DISCONNECTION))
{
/*Transition from CONNECTED to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "CONNECTED", "DISCONNECTING");
WLANBAP_DeInitLinkSupervision(( ptBtampHandle)btampContext);
gotoDisconnecting(
btampContext,
VOS_FALSE,
0,
VOS_TRUE,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
/*Action code for transition */
sme_RoamDisconnect(hHal,
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "CONNECTED", msg);
/* Intentionally left blank */
}
break;
/* JEZ081107: This will only work if I have already signalled the disconnect complete
* event in every case where a physical link complete event is required. And a
* disconnect was requested.
* - - -
* And only if I check for gNeedPhysLinkCompEvent BEFORE I check gDiscRequested.
* Naw! Not necessary.
*/
case DISCONNECTING:
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, Entered DISCONNECTING:", __FUNCTION__);//Debug statement
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_READY_FOR_CONNECTIONS
) && (btampContext->gDiscRequested == VOS_TRUE))
{
/*Transition from DISCONNECTING to DISCONNECTED (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "DISCONNECTING", "DISCONNECTED");
//Clear gDiscRequested;
btampContext->gDiscRequested = VOS_FALSE;
if(btampContext->BAPDeviceRole == BT_INITIATOR)
{
if(!VOS_IS_STATUS_SUCCESS(vos_lock_acquire(&btampContext->bapLock)))
{
VOS_TRACE(VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,"btampFsm, Get LOCK Fail");
}
authRsnFsmFree(btampContext);
if(!VOS_IS_STATUS_SUCCESS(vos_lock_release(&btampContext->bapLock)))
{
VOS_TRACE(VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,"btampFsm, Release LOCK Fail");
}
}
else if(btampContext->BAPDeviceRole == BT_RESPONDER)
{
suppRsnFsmFree(btampContext);
}
/* Lookup the StaId using the phy_link_handle and the BAP context */
vosStatus = WLANBAP_GetStaIdFromLinkCtx (
btampHandle, /* btampHandle value in */
btampContext->phy_link_handle, /* phy_link_handle value in */
&ucSTAId, /* The StaId (used by TL, PE, and HAL) */
&pHddHdl); /* Handle to return BSL context */
if ( VOS_STATUS_SUCCESS != vosStatus )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO,
"Unable to retrieve STA Id from BAP context and phy_link_handle in %s", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
WLANTL_ClearSTAClient(btampContext->pvosGCtx, ucSTAId);
// gotoDisconnected(btampContext);
// VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s:In DISCONNECTING-changing outer state var to DISCONNECTED", __FUNCTION__);
/*Advance outer statevar */
// btampfsmChangeToState(instanceVar,DISCONNECTED);
signalHCIPhysLinkDiscEvent
( btampContext,
WLANBAP_STATUS_SUCCESS,
btampContext->gDiscReason);
/*sme_CloseSession(VOS_GET_HAL_CB(btampContext->pvosGCtx),
btampContext->sessionId);*/
/*Action code for transition */
gotoDisconnected(btampContext);
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s:In DISCONNECTING-changing outer state var to DISCONNECTED", __FUNCTION__);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTED);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_READY_FOR_CONNECTIONS
) && (btampContext->gNeedPhysLinkCompEvent == VOS_TRUE))
{
/*Transition from DISCONNECTING to DISCONNECTED (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s gNeedPhysLinkComp TRUE", __FUNCTION__, "DISCONNECTING", "DISCONNECTED");
if(btampContext->BAPDeviceRole == BT_INITIATOR)
{
if(!VOS_IS_STATUS_SUCCESS(vos_lock_acquire(&btampContext->bapLock)))
{
VOS_TRACE(VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,"btampFsm, Get LOCK Fail");
}
authRsnFsmFree(btampContext);
if(!VOS_IS_STATUS_SUCCESS(vos_lock_release(&btampContext->bapLock)))
{
VOS_TRACE(VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,"btampFsm, Release LOCK Fail");
}
}
else if(btampContext->BAPDeviceRole == BT_RESPONDER)
{
suppRsnFsmFree(btampContext);
}
/* Lookup the StaId using the phy_link_handle and the BAP context */
vosStatus = WLANBAP_GetStaIdFromLinkCtx (
btampHandle, /* btampHandle value in */
btampContext->phy_link_handle, /* phy_link_handle value in */
&ucSTAId, /* The StaId (used by TL, PE, and HAL) */
&pHddHdl); /* Handle to return BSL context */
if ( VOS_STATUS_SUCCESS != vosStatus )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO,
"Unable to retrieve STA Id from BAP context and phy_link_handle in %s", __FUNCTION__);
return VOS_STATUS_E_FAULT;
}
WLANTL_ClearSTAClient(btampContext->pvosGCtx, ucSTAId);
/*Action code for transition */
// signalHCIPhysLinkCompEvent(btampContext, WLANBAP_ERROR_NO_CNCT/*btampContext->gPhysLinkStatus*/);
signalHCIPhysLinkCompEvent(btampContext, btampContext->gPhysLinkStatus);
gotoDisconnected(btampContext);
/*sme_CloseSession(VOS_GET_HAL_CB(btampContext->pvosGCtx),
btampContext->sessionId);*/
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTED);
// signalHCIPhysLinkCompEvent(btampContext, btampContext->gPhysLinkStatus);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "DISCONNECTING", msg);
/* Intentionally left blank */
}
break;
case KEYING:
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_TIMER_CONNECT_ACCEPT_TIMEOUT))
{
/*Transition from KEYING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "KEYING", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_HOST_TIMEOUT,
VOS_FALSE,
0);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT))
{
/*Transition from KEYING to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "KEYING", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_NO_CNCT,
//VOS_TRUE, // Should be VOS_FALSE !!!
VOS_FALSE,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
// It is NOT clear that we need to send the Phy Link Disconnect
// Complete Event here.
signalHCIPhysLinkDiscEvent
( btampContext,
WLANBAP_STATUS_SUCCESS,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_KEY_SET_SUCCESS))
{
/*Transition from KEYING to CONNECTED (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "KEYING", "CONNECTED");
/*Action code for transition */
gotoConnected(btampContext);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,CONNECTED);
signalHCIPhysLinkCompEvent(btampContext, WLANBAP_STATUS_SUCCESS);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "KEYING", msg);
/* Intentionally left blank */
}
break;
case SCANNING:
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_SCAN_COMPLETE))
{
/*Transition from SCANNING to STARTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "SCANNING", "STARTING");
/*Action code for transition */
vosStatus = determineChan(btampContext, BT_INITIATOR, &channel, status);
// This has to be commented out until I get the BT-AMP SME/CSR changes
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,STARTING);
vosStatus = gotoStarting( btampContext, bapEvent, eCSR_BSS_TYPE_WDS_AP, status);
if (VOS_STATUS_SUCCESS != vosStatus)
{
btampfsmChangeToState(instanceVar, SCANNING);
}
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_TIMER_CONNECT_ACCEPT_TIMEOUT))
{
/*Transition from SCANNING to DISCONNECTED (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "SCANNING", "DISCONNECTED");
/*Action code for transition */
gotoDisconnected(btampContext);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTED);
signalHCIPhysLinkCompEvent( btampContext, WLANBAP_ERROR_HOST_TIMEOUT);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT))
{
/*Transition from SCANNING to DISCONNECTED (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "SCANNING", "DISCONNECTED");
/*Action code for transition */
gotoDisconnected(btampContext);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTED);
signalHCIPhysLinkDiscEvent
( btampContext,
WLANBAP_STATUS_SUCCESS,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
signalHCIPhysLinkCompEvent( btampContext, WLANBAP_ERROR_NO_CNCT);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "SCANNING", msg);
/* Intentionally left blank */
}
break;
case VALIDATED:
if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_MAC_CONNECT_COMPLETED
) && (btampContext->BAPDeviceRole == BT_INITIATOR))
{
/*Transition from VALIDATED to AUTHENTICATING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "VALIDATED", "AUTHENTICATING");
gotoAuthenticating(btampContext);
/*Action code for transition */
initRsnAuthenticator(btampContext, BT_INITIATOR);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,AUTHENTICATING);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_TIMER_CONNECT_ACCEPT_TIMEOUT))
{
/*Transition from VALIDATED to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "VALIDATED", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_HOST_TIMEOUT,
VOS_FALSE,
0);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
}
else if((msg==(BTAMPFSM_EVENT_T)eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT))
{
/*Transition from VALIDATED to DISCONNECTING (both without substates)*/
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, from state %s => %s", __FUNCTION__, "VALIDATED", "DISCONNECTING");
/*Action code for transition */
//csrRoamDisconnect();
sme_RoamDisconnect(hHal,
//JEZ081115: Fixme
btampContext->sessionId,
eCSR_DISCONNECT_REASON_UNSPECIFIED);
gotoDisconnecting(
btampContext,
VOS_TRUE,
WLANBAP_ERROR_NO_CNCT,
//VOS_TRUE, // Should be VOS_FALSE !!!
VOS_FALSE,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
/*Advance outer statevar */
btampfsmChangeToState(instanceVar,DISCONNECTING);
// It is NOT clear that we need to send the Phy Link Disconnect
// Complete Event here.
signalHCIPhysLinkDiscEvent
( btampContext,
WLANBAP_STATUS_SUCCESS,
WLANBAP_ERROR_TERM_BY_LOCAL_HOST);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, in state %s, invalid event msg %d", __FUNCTION__, "VALIDATED", msg);
/* Intentionally left blank */
}
break;
default:
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, invalid state %d", __FUNCTION__, instanceVar->stateVar);
/*Intentionally left blank*/
break;
}
return vosStatus;
}
VOS_STATUS btampEstablishLogLink(ptBtampContext btampContext)
{
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
vos_msg_t msg;
tAniBtAmpLogLinkReq *pMsg;
pMsg = vos_mem_malloc(sizeof(tAniBtAmpLogLinkReq));
if ( NULL == pMsg )
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "In %s, failed to allocate mem for req", __FUNCTION__);
return VOS_STATUS_E_NOMEM;
}
pMsg->msgType = pal_cpu_to_be16((tANI_U16)eWNI_SME_BTAMP_LOG_LINK_IND);
pMsg->msgLen = (tANI_U16)sizeof(tAniBtAmpLogLinkReq);
pMsg->sessionId = btampContext->sessionId;
pMsg->btampHandle = btampContext;
msg.type = eWNI_SME_BTAMP_LOG_LINK_IND;
msg.bodyptr = pMsg;
msg.reserved = 0;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MQ_ID_SME, &msg))
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "In %s, failed to post msg to self", __FUNCTION__);
vos_mem_free(pMsg);
vosStatus = VOS_STATUS_E_FAILURE;
}
return vosStatus;
}
void btampEstablishLogLinkHdlr(void* pMsg)
{
tAniBtAmpLogLinkReq *pBtAmpLogLinkReq = (tAniBtAmpLogLinkReq*)pMsg;
ptBtampContext btampContext;
if(pBtAmpLogLinkReq)
{
btampContext = (ptBtampContext)pBtAmpLogLinkReq->btampHandle;
if(NULL != btampContext)
{
vos_sleep( 200 );
WLAN_BAPEstablishLogicalLink(btampContext);
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "In %s, btampContext is NULL", __FUNCTION__);
return;
}
}
else
{
VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "In %s, pBtAmpLogLinkReq is NULL", __FUNCTION__);
}
return;
}
| gpl-2.0 |
boyan3010/Villec2_ShooterU_Kernel_3.0.X | arch/arm/mach-msm/rpm_log.c | 643 | 10084 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/uaccess.h>
#include <mach/msm_iomap.h>
#include "rpm_log.h"
/* registers in MSM_RPM_LOG_PAGE_INDICES */
enum {
MSM_RPM_LOG_TAIL,
MSM_RPM_LOG_HEAD
};
/* used to 4 byte align message lengths */
#define PADDED_LENGTH(x) (0xFFFFFFFC & ((x) + 3))
/* calculates the character string length of a message of byte length x */
#define PRINTED_LENGTH(x) ((x) * 6 + 3)
/* number of ms to wait between checking for new messages in the RPM log */
#define RECHECK_TIME (50)
struct msm_rpm_log_buffer {
char *data;
u32 len;
u32 pos;
u32 max_len;
u32 read_idx;
struct msm_rpm_log_platform_data *pdata;
};
/******************************************************************************
* Internal functions
*****************************************************************************/
static inline u32
msm_rpm_log_read(const struct msm_rpm_log_platform_data *pdata, u32 page,
u32 reg)
{
return readl_relaxed(pdata->reg_base + pdata->reg_offsets[page]
+ reg * 4);
}
/*
* msm_rpm_log_copy() - Copies messages from a volatile circular buffer in
* the RPM's shared memory into a private local buffer
* msg_buffer: pointer to local buffer (string)
* buf_len: length of local buffer in bytes
* read_start_idx: index into shared memory buffer
*
* Return value: number of bytes written to the local buffer
*
* Copies messages stored in a circular buffer in the RPM Message Memory into
* a specified local buffer. The RPM processor is unaware of these reading
* efforts, so care is taken to make sure that messages are valid both before
* and after reading. The RPM processor utilizes a ULog driver to write the
* log. The RPM processor maintains tail and head indices. These correspond
* to the next byte to write into, and the first valid byte, respectively.
* Both indices increase monotonically (except for rollover).
*
* Messages take the form of [(u32)length] [(char)data0,1,...] in which the
* length specifies the number of payload bytes. Messages must be 4 byte
* aligned, so padding is added at the end of a message as needed.
*
* Print format:
* - 0xXX, 0xXX, 0xXX
* - 0xXX
* etc...
*/
static u32 msm_rpm_log_copy(const struct msm_rpm_log_platform_data *pdata,
char *msg_buffer, u32 buf_len, u32 *read_idx)
{
u32 head_idx, tail_idx;
u32 pos = 0;
u32 i = 0;
u32 msg_len;
u32 pos_start;
char temp[4];
tail_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
MSM_RPM_LOG_TAIL);
head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
MSM_RPM_LOG_HEAD);
/* loop while the remote buffer has valid messages left to read */
while (tail_idx - head_idx > 0 && tail_idx - *read_idx > 0) {
head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
MSM_RPM_LOG_HEAD);
/* check if the message to be read is valid */
if (tail_idx - *read_idx > tail_idx - head_idx) {
*read_idx = head_idx;
continue;
}
/*
* Ensure that the reported buffer size is within limits of
* known maximum size and that all indices are 4 byte aligned.
* These conditions are required to interact with a ULog buffer
* properly.
*/
if (tail_idx - head_idx > pdata->log_len ||
!IS_ALIGNED((tail_idx | head_idx | *read_idx), 4))
break;
msg_len = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_BUFFER,
(*read_idx >> 2) & pdata->log_len_mask);
/* handle messages that claim to be longer than the log */
if (PADDED_LENGTH(msg_len) > tail_idx - *read_idx - 4)
msg_len = tail_idx - *read_idx - 4;
/* check that the local buffer has enough space for this msg */
if (pos + PRINTED_LENGTH(msg_len) > buf_len)
break;
pos_start = pos;
pos += scnprintf(msg_buffer + pos, buf_len - pos, "- ");
/* copy message payload to local buffer */
for (i = 0; i < msg_len; i++) {
/* read from shared memory 4 bytes at a time */
if (IS_ALIGNED(i, 4))
*((u32 *)temp) = msm_rpm_log_read(pdata,
MSM_RPM_LOG_PAGE_BUFFER,
((*read_idx + 4 + i) >> 2) &
pdata->log_len_mask);
pos += scnprintf(msg_buffer + pos, buf_len - pos,
"0x%02X, ", temp[i & 0x03]);
}
pos += scnprintf(msg_buffer + pos, buf_len - pos, "\n");
head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
MSM_RPM_LOG_HEAD);
/* roll back if message that was read is not still valid */
if (tail_idx - *read_idx > tail_idx - head_idx)
pos = pos_start;
*read_idx += PADDED_LENGTH(msg_len) + 4;
}
return pos;
}
/*
* msm_rpm_log_file_read() - Reads in log buffer messages then outputs them to a
* user buffer
*
* Return value:
* 0: success
* -ENOMEM: no memory available
* -EINVAL: user buffer null or requested bytes 0
* -EFAULT: user buffer not writeable
* -EAGAIN: no bytes available at the moment
*/
static ssize_t msm_rpm_log_file_read(struct file *file, char __user *bufu,
size_t count, loff_t *ppos)
{
u32 out_len, remaining;
struct msm_rpm_log_platform_data *pdata;
struct msm_rpm_log_buffer *buf;
buf = file->private_data;
pdata = buf->pdata;
if (!pdata)
return -EINVAL;
if (!buf)
return -ENOMEM;
if (!buf->data)
return -ENOMEM;
if (!bufu || count < 0)
return -EINVAL;
if (!access_ok(VERIFY_WRITE, bufu, count))
return -EFAULT;
/* check for more messages if local buffer empty */
if (buf->pos == buf->len) {
buf->pos = 0;
buf->len = msm_rpm_log_copy(pdata, buf->data, buf->max_len,
&(buf->read_idx));
}
if ((file->f_flags & O_NONBLOCK) && buf->len == 0)
return -EAGAIN;
/* loop until new messages arrive */
while (buf->len == 0) {
cond_resched();
if (msleep_interruptible(RECHECK_TIME))
break;
buf->len = msm_rpm_log_copy(pdata, buf->data, buf->max_len,
&(buf->read_idx));
}
out_len = ((buf->len - buf->pos) < count ? buf->len - buf->pos : count);
remaining = __copy_to_user(bufu, &(buf->data[buf->pos]), out_len);
buf->pos += out_len - remaining;
return out_len - remaining;
}
/*
* msm_rpm_log_file_open() - Allows a new reader to open the RPM log virtual
* file
*
* One local buffer is kmalloc'ed for each reader, so no resource sharing has
* to take place (besides the read only access to the RPM log buffer).
*
* Return value:
* 0: success
* -ENOMEM: no memory available
*/
static int msm_rpm_log_file_open(struct inode *inode, struct file *file)
{
struct msm_rpm_log_buffer *buf;
struct msm_rpm_log_platform_data *pdata;
pdata = inode->i_private;
if (!pdata)
return -EINVAL;
file->private_data =
kmalloc(sizeof(struct msm_rpm_log_buffer), GFP_KERNEL);
if (!file->private_data) {
pr_err("%s: ERROR kmalloc failed to allocate %d bytes\n",
__func__, sizeof(struct msm_rpm_log_buffer));
return -ENOMEM;
}
buf = file->private_data;
buf->data = kmalloc(PRINTED_LENGTH(pdata->log_len), GFP_KERNEL);
if (!buf->data) {
kfree(file->private_data);
file->private_data = NULL;
pr_err("%s: ERROR kmalloc failed to allocate %d bytes\n",
__func__, PRINTED_LENGTH(pdata->log_len));
return -ENOMEM;
}
buf->pdata = pdata;
buf->len = 0;
buf->pos = 0;
buf->max_len = PRINTED_LENGTH(pdata->log_len);
buf->read_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
MSM_RPM_LOG_HEAD);
return 0;
}
static int msm_rpm_log_file_close(struct inode *inode, struct file *file)
{
kfree(((struct msm_rpm_log_buffer *)file->private_data)->data);
kfree(file->private_data);
return 0;
}
static const struct file_operations msm_rpm_log_file_fops = {
.owner = THIS_MODULE,
.open = msm_rpm_log_file_open,
.read = msm_rpm_log_file_read,
.release = msm_rpm_log_file_close,
};
static int __devinit msm_rpm_log_probe(struct platform_device *pdev)
{
struct dentry *dent;
struct msm_rpm_log_platform_data *pdata;
pdata = pdev->dev.platform_data;
if (!pdata)
return -EINVAL;
pdata->reg_base = ioremap(pdata->phys_addr_base, pdata->phys_size);
if (!pdata->reg_base) {
pr_err("%s: ERROR could not ioremap: start=%p, len=%u\n",
__func__, (void *) pdata->phys_addr_base,
pdata->phys_size);
return -EBUSY;
}
dent = debugfs_create_file("rpm_log", S_IRUGO, NULL,
pdev->dev.platform_data, &msm_rpm_log_file_fops);
if (!dent) {
pr_err("%s: ERROR debugfs_create_file failed\n", __func__);
return -ENOMEM;
}
platform_set_drvdata(pdev, dent);
pr_notice("%s: OK\n", __func__);
return 0;
}
static int __devexit msm_rpm_log_remove(struct platform_device *pdev)
{
struct dentry *dent;
struct msm_rpm_log_platform_data *pdata;
pdata = pdev->dev.platform_data;
iounmap(pdata->reg_base);
dent = platform_get_drvdata(pdev);
debugfs_remove(dent);
platform_set_drvdata(pdev, NULL);
pr_notice("%s: OK\n", __func__);
return 0;
}
static struct platform_driver msm_rpm_log_driver = {
.probe = msm_rpm_log_probe,
.remove = __devexit_p(msm_rpm_log_remove),
.driver = {
.name = "msm_rpm_log",
.owner = THIS_MODULE,
},
};
static int __init msm_rpm_log_init(void)
{
return platform_driver_register(&msm_rpm_log_driver);
}
static void __exit msm_rpm_log_exit(void)
{
platform_driver_unregister(&msm_rpm_log_driver);
}
module_init(msm_rpm_log_init);
module_exit(msm_rpm_log_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM RPM Log driver");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:msm_rpm_log");
| gpl-2.0 |
percy-g2/android_kernel_sony_taoshan | arch/arm/mach-msm/qdsp5v2/audio_qcelp.c | 643 | 43119 | /*
* qcelp 13k audio decoder device
*
* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
*
* This code is based in part on audio_mp3.c, which is
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org.
*
*/
#include <asm/ioctls.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/earlysuspend.h>
#include <linux/list.h>
#include <linux/android_pmem.h>
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <linux/memory_alloc.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
#include <mach/qdsp5v2/qdsp5audplaymsg.h>
#include <mach/qdsp5v2/audpp.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/debug_mm.h>
#include <mach/msm_memtypes.h>
#define BUFSZ 1094 /* QCELP 13K Hold 600ms packet data = 36 * 30 and
14 bytes of meta in */
#define BUF_COUNT 2
#define DMASZ (BUFSZ * BUF_COUNT)
#define PCM_BUFSZ_MIN 1624 /* 100ms worth of data and
24 bytes of meta out */
#define PCM_BUF_MAX_COUNT 5
#define AUDDEC_DEC_QCELP 9
#define ROUTING_MODE_FTRT 1
#define ROUTING_MODE_RT 2
/* Decoder status received from AUDPPTASK */
#define AUDPP_DEC_STATUS_SLEEP 0
#define AUDPP_DEC_STATUS_INIT 1
#define AUDPP_DEC_STATUS_CFG 2
#define AUDPP_DEC_STATUS_PLAY 3
#define AUDQCELP_METAFIELD_MASK 0xFFFF0000
#define AUDQCELP_EOS_FLG_OFFSET 0x0A /* Offset from beginning of buffer */
#define AUDQCELP_EOS_FLG_MASK 0x01
#define AUDQCELP_EOS_NONE 0x0 /* No EOS detected */
#define AUDQCELP_EOS_SET 0x1 /* EOS set in meta field */
#define AUDQCELP_EVENT_NUM 10 /* Default number of pre-allocated event pkts */
struct buffer {
void *data;
unsigned size;
unsigned used; /* Input usage actual DSP produced PCM size */
unsigned addr;
unsigned short mfield_sz; /*only useful for data has meta field */
};
#ifdef CONFIG_HAS_EARLYSUSPEND
struct audqcelp_suspend_ctl {
struct early_suspend node;
struct audio *audio;
};
#endif
struct audqcelp_event{
struct list_head list;
int event_type;
union msm_audio_event_payload payload;
};
struct audio {
struct buffer out[BUF_COUNT];
spinlock_t dsp_lock;
uint8_t out_head;
uint8_t out_tail;
uint8_t out_needed; /* number of buffers the dsp is waiting for */
struct mutex lock;
struct mutex write_lock;
wait_queue_head_t write_wait;
/* Host PCM section - START */
struct buffer in[PCM_BUF_MAX_COUNT];
struct mutex read_lock;
wait_queue_head_t read_wait; /* Wait queue for read */
char *read_data; /* pointer to reader buffer */
int32_t read_phys; /* physical address of reader buffer */
uint8_t read_next; /* index to input buffers to be read next */
uint8_t fill_next; /* index to buffer that DSP should be filling */
uint8_t pcm_buf_count; /* number of pcm buffer allocated */
/* Host PCM section - END */
struct msm_adsp_module *audplay;
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
void *map_v_read;
void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
int wflush; /* Write flush */
uint8_t opened:1;
uint8_t enabled:1;
uint8_t running:1;
uint8_t stopped:1; /* set when stopped, cleared on flush */
uint8_t pcm_feedback:1; /* set when non-tunnel mode */
uint8_t buf_refresh:1;
int teos; /* valid only if tunnel mode & no data left for decoder */
enum msm_aud_decoder_state dec_state; /* Represents decoder state */
const char *module_name;
unsigned queue_id;
uint16_t dec_id;
int16_t source;
#ifdef CONFIG_HAS_EARLYSUSPEND
struct audqcelp_suspend_ctl suspend_ctl;
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
#endif
wait_queue_head_t wait;
struct list_head free_event_queue;
struct list_head event_queue;
wait_queue_head_t event_wait;
spinlock_t event_queue_lock;
struct mutex get_event_lock;
int event_abort;
/* AV sync Info */
int avsync_flag; /* Flag to indicate feedback from DSP */
wait_queue_head_t avsync_wait;/* Wait queue for AV Sync Message */
/* flags, 48 bits sample/bytes counter per channel */
uint16_t avsync[AUDPP_AVSYNC_CH_COUNT * AUDPP_AVSYNC_NUM_WORDS + 1];
uint32_t device_events;
int eq_enable;
int eq_needs_commit;
struct audpp_cmd_cfg_object_params_eqalizer eq;
struct audpp_cmd_cfg_object_params_volume vol_pan;
};
static int auddec_dsp_config(struct audio *audio, int enable);
static void audpp_cmd_cfg_adec_params(struct audio *audio);
static void audpp_cmd_cfg_routing_mode(struct audio *audio);
static void audqcelp_send_data(struct audio *audio, unsigned needed);
static void audqcelp_config_hostpcm(struct audio *audio);
static void audqcelp_buffer_refresh(struct audio *audio);
static void audqcelp_dsp_event(void *private, unsigned id, uint16_t *msg);
#ifdef CONFIG_HAS_EARLYSUSPEND
static void audqcelp_post_event(struct audio *audio, int type,
union msm_audio_event_payload payload);
#endif
/* must be called with audio->lock held */
static int audqcelp_enable(struct audio *audio)
{
MM_DBG("\n"); /* Macro prints the file name and function */
if (audio->enabled)
return 0;
audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
audio->out_tail = 0;
audio->out_needed = 0;
if (msm_adsp_enable(audio->audplay)) {
MM_ERR("msm_adsp_enable(audplay) failed\n");
return -ENODEV;
}
if (audpp_enable(audio->dec_id, audqcelp_dsp_event, audio)) {
MM_ERR("audpp_enable() failed\n");
msm_adsp_disable(audio->audplay);
return -ENODEV;
}
audio->enabled = 1;
return 0;
}
static void qcelp_listner(u32 evt_id, union auddev_evt_data *evt_payload,
void *private_data)
{
struct audio *audio = (struct audio *) private_data;
switch (evt_id) {
case AUDDEV_EVT_DEV_RDY:
MM_DBG(":AUDDEV_EVT_DEV_RDY\n");
audio->source |= (0x1 << evt_payload->routing_id);
if (audio->running == 1 && audio->enabled == 1)
audpp_route_stream(audio->dec_id, audio->source);
break;
case AUDDEV_EVT_DEV_RLS:
MM_DBG(":AUDDEV_EVT_DEV_RLS\n");
audio->source &= ~(0x1 << evt_payload->routing_id);
if (audio->running == 1 && audio->enabled == 1)
audpp_route_stream(audio->dec_id, audio->source);
break;
case AUDDEV_EVT_STREAM_VOL_CHG:
audio->vol_pan.volume = evt_payload->session_vol;
MM_DBG(":AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d\n",
audio->vol_pan.volume);
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
break;
default:
MM_ERR(":ERROR:wrong event\n");
break;
}
}
/* must be called with audio->lock held */
static int audqcelp_disable(struct audio *audio)
{
int rc = 0;
MM_DBG("\n"); /* Macro prints the file name and function */
if (audio->enabled) {
audio->enabled = 0;
audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
auddec_dsp_config(audio, 0);
rc = wait_event_interruptible_timeout(audio->wait,
audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
if (rc == 0)
rc = -ETIMEDOUT;
else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE)
rc = -EFAULT;
else
rc = 0;
wake_up(&audio->write_wait);
wake_up(&audio->read_wait);
msm_adsp_disable(audio->audplay);
audpp_disable(audio->dec_id, audio);
audio->out_needed = 0;
}
return rc;
}
/* ------------------- dsp --------------------- */
static void audqcelp_update_pcm_buf_entry(struct audio *audio,
uint32_t *payload)
{
uint8_t index;
unsigned long flags;
if (audio->rflush)
return;
spin_lock_irqsave(&audio->dsp_lock, flags);
for (index = 0; index < payload[1]; index++) {
if (audio->in[audio->fill_next].addr ==
payload[2 + index * 2]) {
MM_DBG("in[%d] ready\n", audio->fill_next);
audio->in[audio->fill_next].used =
payload[3 + index * 2];
if ((++audio->fill_next) == audio->pcm_buf_count)
audio->fill_next = 0;
} else {
MM_ERR("expected=%x ret=%x\n",
audio->in[audio->fill_next].addr,
payload[1 + index * 2]);
break;
}
}
if (audio->in[audio->fill_next].used == 0) {
audqcelp_buffer_refresh(audio);
} else {
MM_DBG("read cannot keep up\n");
audio->buf_refresh = 1;
}
wake_up(&audio->read_wait);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
static void audplay_dsp_event(void *data, unsigned id, size_t len,
void (*getevent) (void *ptr, size_t len))
{
struct audio *audio = data;
uint32_t msg[28];
getevent(msg, sizeof(msg));
MM_DBG("msg_id=%x\n", id);
switch (id) {
case AUDPLAY_MSG_DEC_NEEDS_DATA:
audqcelp_send_data(audio, 1);
break;
case AUDPLAY_MSG_BUFFER_UPDATE:
audqcelp_update_pcm_buf_entry(audio, msg);
break;
case ADSP_MESSAGE_ID:
MM_DBG("Received ADSP event: module enable(audplaytask)\n");
break;
default:
MM_ERR("unexpected message from decoder \n");
}
}
static void audqcelp_dsp_event(void *private, unsigned id, uint16_t *msg)
{
struct audio *audio = private;
switch (id) {
case AUDPP_MSG_STATUS_MSG:{
unsigned status = msg[1];
switch (status) {
case AUDPP_DEC_STATUS_SLEEP: {
uint16_t reason = msg[2];
MM_DBG("decoder status:sleep reason = \
0x%04x\n", reason);
if ((reason == AUDPP_MSG_REASON_MEM)
|| (reason ==
AUDPP_MSG_REASON_NODECODER)) {
audio->dec_state =
MSM_AUD_DECODER_STATE_FAILURE;
wake_up(&audio->wait);
} else if (reason == AUDPP_MSG_REASON_NONE) {
/* decoder is in disable state */
audio->dec_state =
MSM_AUD_DECODER_STATE_CLOSE;
wake_up(&audio->wait);
}
break;
}
case AUDPP_DEC_STATUS_INIT:
MM_DBG("decoder status: init \n");
if (audio->pcm_feedback)
audpp_cmd_cfg_routing_mode(audio);
else
audpp_cmd_cfg_adec_params(audio);
break;
case AUDPP_DEC_STATUS_CFG:
MM_DBG("decoder status: cfg \n");
break;
case AUDPP_DEC_STATUS_PLAY:
MM_DBG("decoder status: play \n");
/* send mixer command */
audpp_route_stream(audio->dec_id,
audio->source);
if (audio->pcm_feedback) {
audqcelp_config_hostpcm(audio);
audqcelp_buffer_refresh(audio);
}
audio->dec_state =
MSM_AUD_DECODER_STATE_SUCCESS;
wake_up(&audio->wait);
break;
default:
MM_ERR("unknown decoder status\n");
}
break;
}
case AUDPP_MSG_CFG_MSG:
if (msg[0] == AUDPP_MSG_ENA_ENA) {
MM_DBG("CFG_MSG ENABLE\n");
auddec_dsp_config(audio, 1);
audio->out_needed = 0;
audio->running = 1;
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
audpp_dsp_set_eq(audio->dec_id, audio->eq_enable,
&audio->eq, POPP);
} else if (msg[0] == AUDPP_MSG_ENA_DIS) {
MM_DBG("CFG_MSG DISABLE\n");
audio->running = 0;
} else {
MM_DBG("CFG_MSG %d?\n", msg[0]);
}
break;
case AUDPP_MSG_ROUTING_ACK:
MM_DBG("ROUTING_ACK mode=%d\n", msg[1]);
audpp_cmd_cfg_adec_params(audio);
break;
case AUDPP_MSG_FLUSH_ACK:
MM_DBG("FLUSH_ACK\n");
audio->wflush = 0;
audio->rflush = 0;
wake_up(&audio->write_wait);
if (audio->pcm_feedback)
audqcelp_buffer_refresh(audio);
break;
case AUDPP_MSG_PCMDMAMISSED:
MM_DBG("PCMDMAMISSED\n");
audio->teos = 1;
wake_up(&audio->write_wait);
break;
case AUDPP_MSG_AVSYNC_MSG:
MM_DBG("AUDPP_MSG_AVSYNC_MSG\n");
memcpy(&audio->avsync[0], msg, sizeof(audio->avsync));
audio->avsync_flag = 1;
wake_up(&audio->avsync_wait);
break;
default:
MM_ERR("UNKNOWN (%d)\n", id);
}
}
struct msm_adsp_ops audplay_adsp_ops_qcelp = {
.event = audplay_dsp_event,
};
#define audplay_send_queue0(audio, cmd, len) \
msm_adsp_write(audio->audplay, audio->queue_id, \
cmd, len)
static int auddec_dsp_config(struct audio *audio, int enable)
{
struct audpp_cmd_cfg_dec_type cfg_dec_cmd;
memset(&cfg_dec_cmd, 0, sizeof(cfg_dec_cmd));
cfg_dec_cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE;
if (enable)
cfg_dec_cmd.dec_cfg = AUDPP_CMD_UPDATDE_CFG_DEC |
AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_QCELP;
else
cfg_dec_cmd.dec_cfg = AUDPP_CMD_UPDATDE_CFG_DEC |
AUDPP_CMD_DIS_DEC_V;
cfg_dec_cmd.dm_mode = 0x0;
cfg_dec_cmd.stream_id = audio->dec_id;
return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd));
}
static void audpp_cmd_cfg_adec_params(struct audio *audio)
{
struct audpp_cmd_cfg_adec_params_v13k cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS;
cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_V13K_LEN;
cmd.common.dec_id = audio->dec_id;
cmd.common.input_sampling_frequency = 8000;
cmd.stereo_cfg = AUDPP_CMD_PCM_INTF_MONO_V;
audpp_send_queue2(&cmd, sizeof(cmd));
}
static void audpp_cmd_cfg_routing_mode(struct audio *audio)
{
struct audpp_cmd_routing_mode cmd;
MM_DBG("\n"); /* Macro prints the file name and function */
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = AUDPP_CMD_ROUTING_MODE;
cmd.object_number = audio->dec_id;
if (audio->pcm_feedback)
cmd.routing_mode = ROUTING_MODE_FTRT;
else
cmd.routing_mode = ROUTING_MODE_RT;
audpp_send_queue1(&cmd, sizeof(cmd));
}
static int audplay_dsp_send_data_avail(struct audio *audio,
unsigned idx, unsigned len)
{
struct audplay_cmd_bitstream_data_avail_nt2 cmd;
cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2;
if (audio->mfield)
cmd.decoder_id = AUDQCELP_METAFIELD_MASK |
(audio->out[idx].mfield_sz >> 1);
else
cmd.decoder_id = audio->dec_id;
cmd.buf_ptr = audio->out[idx].addr;
cmd.buf_size = len / 2;
cmd.partition_number = 0;
return audplay_send_queue0(audio, &cmd, sizeof(cmd));
}
static void audqcelp_buffer_refresh(struct audio *audio)
{
struct audplay_cmd_buffer_refresh refresh_cmd;
refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH;
refresh_cmd.num_buffers = 1;
refresh_cmd.buf0_address = audio->in[audio->fill_next].addr;
refresh_cmd.buf0_length = audio->in[audio->fill_next].size;
refresh_cmd.buf_read_count = 0;
MM_DBG("buf0_addr=%x buf0_len=%d\n", refresh_cmd.buf0_address,
refresh_cmd.buf0_length);
(void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd));
}
static void audqcelp_config_hostpcm(struct audio *audio)
{
struct audplay_cmd_hpcm_buf_cfg cfg_cmd;
MM_DBG("\n"); /* Macro prints the file name and function */
cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG;
cfg_cmd.max_buffers = 1;
cfg_cmd.byte_swap = 0;
cfg_cmd.hostpcm_config = (0x8000) | (0x4000);
cfg_cmd.feedback_frequency = 1;
cfg_cmd.partition_number = 0;
(void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd));
}
static void audqcelp_send_data(struct audio *audio, unsigned needed)
{
struct buffer *frame;
unsigned long flags;
spin_lock_irqsave(&audio->dsp_lock, flags);
if (!audio->running)
goto done;
if (needed && !audio->wflush) {
/* We were called from the callback because the DSP
* requested more data. Note that the DSP does want
* more data, and if a buffer was in-flight, mark it
* as available (since the DSP must now be done with
* it).
*/
audio->out_needed = 1;
frame = audio->out + audio->out_tail;
if (frame->used == 0xffffffff) {
MM_DBG("frame %d free\n", audio->out_tail);
frame->used = 0;
audio->out_tail ^= 1;
wake_up(&audio->write_wait);
}
}
if (audio->out_needed) {
/* If the DSP currently wants data and we have a
* buffer available, we will send it and reset
* the needed flag. We'll mark the buffer as in-flight
* so that it won't be recycled until the next buffer
* is requested
*/
frame = audio->out + audio->out_tail;
if (frame->used) {
BUG_ON(frame->used == 0xffffffff);
MM_DBG("frame %d busy\n", audio->out_tail);
audplay_dsp_send_data_avail(audio, audio->out_tail,
frame->used);
frame->used = 0xffffffff;
audio->out_needed = 0;
}
}
done:
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
/* ------------------- device --------------------- */
static void audqcelp_flush(struct audio *audio)
{
audio->out[0].used = 0;
audio->out[1].used = 0;
audio->out_head = 0;
audio->out_tail = 0;
audio->out_needed = 0;
}
static void audqcelp_flush_pcm_buf(struct audio *audio)
{
uint8_t index;
for (index = 0; index < PCM_BUF_MAX_COUNT; index++)
audio->in[index].used = 0;
audio->buf_refresh = 0;
audio->read_next = 0;
audio->fill_next = 0;
}
static void audqcelp_ioport_reset(struct audio *audio)
{
/* Make sure read/write thread are free from
* sleep and knowing that system is not able
* to process io request at the moment
*/
wake_up(&audio->write_wait);
mutex_lock(&audio->write_lock);
audqcelp_flush(audio);
mutex_unlock(&audio->write_lock);
wake_up(&audio->read_wait);
mutex_lock(&audio->read_lock);
audqcelp_flush_pcm_buf(audio);
mutex_unlock(&audio->read_lock);
audio->avsync_flag = 1;
wake_up(&audio->avsync_wait);
}
static int audqcelp_events_pending(struct audio *audio)
{
unsigned long flags;
int empty;
spin_lock_irqsave(&audio->event_queue_lock, flags);
empty = !list_empty(&audio->event_queue);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
return empty || audio->event_abort;
}
static void audqcelp_reset_event_queue(struct audio *audio)
{
unsigned long flags;
struct audqcelp_event *drv_evt;
struct list_head *ptr, *next;
spin_lock_irqsave(&audio->event_queue_lock, flags);
list_for_each_safe(ptr, next, &audio->event_queue) {
drv_evt = list_first_entry(&audio->event_queue,
struct audqcelp_event, list);
list_del(&drv_evt->list);
kfree(drv_evt);
}
list_for_each_safe(ptr, next, &audio->free_event_queue) {
drv_evt = list_first_entry(&audio->free_event_queue,
struct audqcelp_event, list);
list_del(&drv_evt->list);
kfree(drv_evt);
}
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
return;
}
static long audqcelp_process_event_req(struct audio *audio, void __user *arg)
{
long rc;
struct msm_audio_event usr_evt;
struct audqcelp_event *drv_evt = NULL;
int timeout;
unsigned long flags;
if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event)))
return -EFAULT;
timeout = (int) usr_evt.timeout_ms;
if (timeout > 0) {
rc = wait_event_interruptible_timeout(
audio->event_wait, audqcelp_events_pending(audio),
msecs_to_jiffies(timeout));
if (rc == 0)
return -ETIMEDOUT;
} else {
rc = wait_event_interruptible(
audio->event_wait, audqcelp_events_pending(audio));
}
if (rc < 0)
return rc;
if (audio->event_abort) {
audio->event_abort = 0;
return -ENODEV;
}
rc = 0;
spin_lock_irqsave(&audio->event_queue_lock, flags);
if (!list_empty(&audio->event_queue)) {
drv_evt = list_first_entry(&audio->event_queue,
struct audqcelp_event, list);
list_del(&drv_evt->list);
}
if (drv_evt) {
usr_evt.event_type = drv_evt->event_type;
usr_evt.event_payload = drv_evt->payload;
list_add_tail(&drv_evt->list, &audio->free_event_queue);
} else
rc = -1;
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt)))
rc = -EFAULT;
return rc;
}
static int audio_enable_eq(struct audio *audio, int enable)
{
if (audio->eq_enable == enable && !audio->eq_needs_commit)
return 0;
audio->eq_enable = enable;
if (audio->running) {
audpp_dsp_set_eq(audio->dec_id, enable, &audio->eq, POPP);
audio->eq_needs_commit = 0;
}
return 0;
}
static int audio_get_avsync_data(struct audio *audio,
struct msm_audio_stats *stats)
{
int rc = -EINVAL;
unsigned long flags;
local_irq_save(flags);
if (audio->dec_id == audio->avsync[0] && audio->avsync_flag) {
/* av_sync sample count */
stats->sample_count = (audio->avsync[2] << 16) |
(audio->avsync[3]);
/* av_sync byte_count */
stats->byte_count = (audio->avsync[5] << 16) |
(audio->avsync[6]);
audio->avsync_flag = 0;
rc = 0;
}
local_irq_restore(flags);
return rc;
}
static long audqcelp_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct audio *audio = file->private_data;
int rc = -EINVAL;
unsigned long flags = 0;
uint16_t enable_mask;
int enable;
int prev_state;
MM_DBG("cmd = %d\n", cmd);
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
audio->avsync_flag = 0;
memset(&stats, 0, sizeof(stats));
if (audpp_query_avsync(audio->dec_id) < 0)
return rc;
rc = wait_event_interruptible_timeout(audio->avsync_wait,
(audio->avsync_flag == 1),
msecs_to_jiffies(AUDPP_AVSYNC_EVENT_TIMEOUT));
if (rc < 0)
return rc;
else if ((rc > 0) || ((rc == 0) && (audio->avsync_flag == 1))) {
if (audio_get_avsync_data(audio, &stats) < 0)
return rc;
if (copy_to_user((void *)arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
} else
return -EAGAIN;
}
switch (cmd) {
case AUDIO_ENABLE_AUDPP:
if (copy_from_user(&enable_mask, (void *) arg,
sizeof(enable_mask))) {
rc = -EFAULT;
break;
}
spin_lock_irqsave(&audio->dsp_lock, flags);
enable = (enable_mask & EQ_ENABLE) ? 1 : 0;
audio_enable_eq(audio, enable);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
rc = 0;
break;
case AUDIO_SET_VOLUME:
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->vol_pan.volume = arg;
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
rc = 0;
break;
case AUDIO_SET_PAN:
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->vol_pan.pan = arg;
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
rc = 0;
break;
case AUDIO_SET_EQ:
prev_state = audio->eq_enable;
audio->eq_enable = 0;
if (copy_from_user(&audio->eq.num_bands, (void *) arg,
sizeof(audio->eq) -
(AUDPP_CMD_CFG_OBJECT_PARAMS_COMMON_LEN + 2))) {
rc = -EFAULT;
break;
}
audio->eq_enable = prev_state;
audio->eq_needs_commit = 1;
rc = 0;
break;
}
if (-EINVAL != rc)
return rc;
if (cmd == AUDIO_GET_EVENT) {
MM_DBG("AUDIO_GET_EVENT\n");
if (mutex_trylock(&audio->get_event_lock)) {
rc = audqcelp_process_event_req(audio,
(void __user *) arg);
mutex_unlock(&audio->get_event_lock);
} else
rc = -EBUSY;
return rc;
}
if (cmd == AUDIO_ABORT_GET_EVENT) {
audio->event_abort = 1;
wake_up(&audio->event_wait);
return 0;
}
mutex_lock(&audio->lock);
switch (cmd) {
case AUDIO_START:
MM_DBG("AUDIO_START\n");
rc = audqcelp_enable(audio);
if (!rc) {
rc = wait_event_interruptible_timeout(audio->wait,
audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc);
if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS)
rc = -ENODEV;
else
rc = 0;
}
break;
case AUDIO_STOP:
MM_DBG("AUDIO_STOP\n");
rc = audqcelp_disable(audio);
audio->stopped = 1;
audqcelp_ioport_reset(audio);
audio->stopped = 0;
break;
case AUDIO_FLUSH:
MM_DBG("AUDIO_FLUSH\n");
audio->rflush = 1;
audio->wflush = 1;
audqcelp_ioport_reset(audio);
if (audio->running) {
audpp_flush(audio->dec_id);
rc = wait_event_interruptible(audio->write_wait,
!audio->wflush);
if (rc < 0) {
MM_ERR("AUDIO_FLUSH interrupted\n");
rc = -EINTR;
}
} else {
audio->rflush = 0;
audio->wflush = 0;
}
break;
case AUDIO_SET_CONFIG:{
struct msm_audio_config config;
if (copy_from_user(&config, (void *)arg,
sizeof(config))) {
rc = -EFAULT;
break;
}
audio->mfield = config.meta_field;
MM_DBG("AUDIO_SET_CONFIG applicable \
for metafield configuration\n");
rc = 0;
break;
}
case AUDIO_GET_CONFIG:{
struct msm_audio_config config;
config.buffer_size = BUFSZ;
config.buffer_count = BUF_COUNT;
config.sample_rate = 8000;
config.channel_count = 1;
config.meta_field = 0;
config.unused[0] = 0;
config.unused[1] = 0;
config.unused[2] = 0;
if (copy_to_user((void *)arg, &config,
sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_GET_PCM_CONFIG:{
struct msm_audio_pcm_config config;
config.pcm_feedback = audio->pcm_feedback;
config.buffer_count = PCM_BUF_MAX_COUNT;
config.buffer_size = PCM_BUFSZ_MIN;
if (copy_to_user((void *)arg, &config,
sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_SET_PCM_CONFIG:{
struct msm_audio_pcm_config config;
if (copy_from_user(&config, (void *)arg,
sizeof(config))) {
rc = -EFAULT;
break;
}
if (config.pcm_feedback != audio->pcm_feedback) {
MM_ERR("Not sufficient permission to"
"change the playback mode\n");
rc = -EACCES;
break;
}
if ((config.buffer_count > PCM_BUF_MAX_COUNT) ||
(config.buffer_count == 1))
config.buffer_count = PCM_BUF_MAX_COUNT;
if (config.buffer_size < PCM_BUFSZ_MIN)
config.buffer_size = PCM_BUFSZ_MIN;
/* Check if pcm feedback is required */
if ((config.pcm_feedback) && (!audio->read_data)) {
MM_DBG("allocate PCM buf %d\n",
config.buffer_count * config.buffer_size);
audio->read_phys =
allocate_contiguous_ebi_nomap(
config.buffer_size *
config.buffer_count,
SZ_4K);
if (!audio->read_phys) {
rc = -ENOMEM;
break;
}
audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map read buf\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(
audio->read_phys);
} else {
uint8_t index;
uint32_t offset = 0;
audio->read_data =
audio->map_v_read;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
audio->read_next = 0;
audio->fill_next = 0;
for (index = 0;
index < config.buffer_count; index++) {
audio->in[index].data =
audio->read_data + offset;
audio->in[index].addr =
audio->read_phys + offset;
audio->in[index].size =
config.buffer_size;
audio->in[index].used = 0;
offset += config.buffer_size;
}
MM_DBG("read buf: phy addr 0x%08x \
kernel addr 0x%08x\n",
audio->read_phys,
(int)audio->read_data);
rc = 0;
}
} else {
rc = 0;
}
break;
}
case AUDIO_PAUSE:
MM_DBG("AUDIO_PAUSE %ld\n", arg);
rc = audpp_pause(audio->dec_id, (int) arg);
break;
case AUDIO_GET_SESSION_ID:
if (copy_to_user((void *) arg, &audio->dec_id,
sizeof(unsigned short)))
rc = -EFAULT;
else
rc = 0;
break;
default:
rc = -EINVAL;
}
mutex_unlock(&audio->lock);
return rc;
}
/* Only useful in tunnel-mode */
static int audqcelp_fsync(struct file *file, loff_t ppos1, loff_t ppos2, int datasync)
{
struct audio *audio = file->private_data;
int rc = 0;
MM_DBG("\n"); /* Macro prints the file name and function */
if (!audio->running || audio->pcm_feedback) {
rc = -EINVAL;
goto done_nolock;
}
mutex_lock(&audio->write_lock);
rc = wait_event_interruptible(audio->write_wait,
(!audio->out[0].used &&
!audio->out[1].used &&
audio->out_needed) || audio->wflush);
if (rc < 0)
goto done;
else if (audio->wflush) {
rc = -EBUSY;
goto done;
}
/* pcm dmamiss message is sent continously
* when decoder is starved so no race
* condition concern
*/
audio->teos = 0;
rc = wait_event_interruptible(audio->write_wait,
audio->teos || audio->wflush);
if (audio->wflush)
rc = -EBUSY;
done:
mutex_unlock(&audio->write_lock);
done_nolock:
return rc;
}
static ssize_t audqcelp_read(struct file *file, char __user *buf, size_t count,
loff_t *pos)
{
struct audio *audio = file->private_data;
const char __user *start = buf;
int rc = 0;
if (!audio->pcm_feedback)
return 0; /* PCM feedback is not enabled. Nothing to read */
mutex_lock(&audio->read_lock);
MM_DBG("%d\n", count);
while (count > 0) {
rc = wait_event_interruptible(audio->read_wait,
(audio->in[audio->read_next].used > 0) ||
(audio->stopped) || (audio->rflush));
if (rc < 0)
break;
if (audio->stopped || audio->rflush) {
rc = -EBUSY;
break;
}
if (count < audio->in[audio->read_next].used) {
/* Read must happen in frame boundary. Since driver does
not know frame size, read count must be greater or equal
to size of PCM samples */
MM_DBG("read stop - partial frame\n");
break;
} else {
MM_DBG("read from in[%d]\n", audio->read_next);
if (copy_to_user(buf,
audio->in[audio->read_next].data,
audio->in[audio->read_next].used)) {
MM_ERR("invalid addr %x\n", (unsigned int)buf);
rc = -EFAULT;
break;
}
count -= audio->in[audio->read_next].used;
buf += audio->in[audio->read_next].used;
audio->in[audio->read_next].used = 0;
if ((++audio->read_next) == audio->pcm_buf_count)
audio->read_next = 0;
break;
/* Force to exit while loop
* to prevent output thread
* sleep too long if data is
* not ready at this moment.
*/
}
}
/* don't feed output buffer to HW decoder during flushing
* buffer refresh command will be sent once flush completes
* send buf refresh command here can confuse HW decoder
*/
if (audio->buf_refresh && !audio->rflush) {
audio->buf_refresh = 0;
MM_DBG("kick start pcm feedback again\n");
audqcelp_buffer_refresh(audio);
}
mutex_unlock(&audio->read_lock);
if (buf > start)
rc = buf - start;
MM_DBG("read %d bytes\n", rc);
return rc;
}
static int audqcelp_process_eos(struct audio *audio,
const char __user *buf_start, unsigned short mfield_size)
{
struct buffer *frame;
int rc = 0;
frame = audio->out + audio->out_head;
rc = wait_event_interruptible(audio->write_wait,
(audio->out_needed &&
audio->out[0].used == 0 &&
audio->out[1].used == 0)
|| (audio->stopped)
|| (audio->wflush));
if (rc < 0)
goto done;
if (audio->stopped || audio->wflush) {
rc = -EBUSY;
goto done;
}
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
}
frame->mfield_sz = mfield_size;
audio->out_head ^= 1;
frame->used = mfield_size;
audqcelp_send_data(audio, 0);
done:
return rc;
}
static ssize_t audqcelp_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct audio *audio = file->private_data;
const char __user *start = buf;
struct buffer *frame;
size_t xfer;
char *cpy_ptr;
int rc = 0, eos_condition = AUDQCELP_EOS_NONE;
unsigned short mfield_size = 0;
MM_DBG("cnt=%d\n", count);
if (count & 1)
return -EINVAL;
mutex_lock(&audio->write_lock);
while (count > 0) {
frame = audio->out + audio->out_head;
cpy_ptr = frame->data;
rc = wait_event_interruptible(audio->write_wait,
(frame->used == 0)
|| (audio->stopped)
|| (audio->wflush));
MM_DBG("buffer available\n");
if (rc < 0)
break;
if (audio->stopped || audio->wflush) {
rc = -EBUSY;
break;
}
if (audio->mfield) {
if (buf == start) {
/* Processing beginning of user buffer */
if (__get_user(mfield_size,
(unsigned short __user *) buf)) {
rc = -EFAULT;
break;
} else if (mfield_size > count) {
rc = -EINVAL;
break;
}
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf, mfield_size)) {
rc = -EFAULT;
break;
}
/* Check if EOS flag is set and buffer has
* contains just meta field
*/
if (cpy_ptr[AUDQCELP_EOS_FLG_OFFSET] &
AUDQCELP_EOS_FLG_MASK) {
MM_DBG("EOS SET\n");
eos_condition = AUDQCELP_EOS_SET;
if (mfield_size == count) {
buf += mfield_size;
break;
} else
cpy_ptr[AUDQCELP_EOS_FLG_OFFSET] &=
~AUDQCELP_EOS_FLG_MASK;
}
cpy_ptr += mfield_size;
count -= mfield_size;
buf += mfield_size;
} else {
mfield_size = 0;
MM_DBG("continuous buffer\n");
}
frame->mfield_sz = mfield_size;
}
xfer = (count > (frame->size - mfield_size)) ?
(frame->size - mfield_size) : count;
if (copy_from_user(cpy_ptr, buf, xfer)) {
rc = -EFAULT;
break;
}
frame->used = xfer + mfield_size;
audio->out_head ^= 1;
count -= xfer;
buf += xfer;
audqcelp_send_data(audio, 0);
}
if (eos_condition == AUDQCELP_EOS_SET)
rc = audqcelp_process_eos(audio, start, mfield_size);
mutex_unlock(&audio->write_lock);
if (!rc) {
if (buf > start)
return buf - start;
}
return rc;
}
static int audqcelp_release(struct inode *inode, struct file *file)
{
struct audio *audio = file->private_data;
MM_INFO("audio instance 0x%08x freeing\n", (int) audio);
mutex_lock(&audio->lock);
auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->dec_id);
audqcelp_disable(audio);
audqcelp_flush(audio);
audqcelp_flush_pcm_buf(audio);
msm_adsp_put(audio->audplay);
audpp_adec_free(audio->dec_id);
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&audio->suspend_ctl.node);
#endif
audio->opened = 0;
audio->event_abort = 1;
wake_up(&audio->event_wait);
audqcelp_reset_event_queue(audio);
iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
if (audio->read_data) {
iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
#ifdef CONFIG_DEBUG_FS
if (audio->dentry)
debugfs_remove(audio->dentry);
#endif
kfree(audio);
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void audqcelp_post_event(struct audio *audio, int type,
union msm_audio_event_payload payload)
{
struct audqcelp_event *e_node = NULL;
unsigned long flags;
spin_lock_irqsave(&audio->event_queue_lock, flags);
if (!list_empty(&audio->free_event_queue)) {
e_node = list_first_entry(&audio->free_event_queue,
struct audqcelp_event, list);
list_del(&e_node->list);
} else {
e_node = kmalloc(sizeof(struct audqcelp_event), GFP_ATOMIC);
if (!e_node) {
MM_ERR("No mem to post event %d\n", type);
return;
}
}
e_node->event_type = type;
e_node->payload = payload;
list_add_tail(&e_node->list, &audio->event_queue);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
wake_up(&audio->event_wait);
}
static void audqcelp_suspend(struct early_suspend *h)
{
struct audqcelp_suspend_ctl *ctl =
container_of(h, struct audqcelp_suspend_ctl, node);
union msm_audio_event_payload payload;
MM_DBG("\n"); /* Macro prints the file name and function */
audqcelp_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload);
}
static void audqcelp_resume(struct early_suspend *h)
{
struct audqcelp_suspend_ctl *ctl =
container_of(h, struct audqcelp_suspend_ctl, node);
union msm_audio_event_payload payload;
MM_DBG("\n"); /* Macro prints the file name and function */
audqcelp_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload);
}
#endif
#ifdef CONFIG_DEBUG_FS
static ssize_t audqcelp_debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t audqcelp_debug_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
const int debug_bufmax = 1024;
static char buffer[1024];
int n = 0, i;
struct audio *audio = file->private_data;
mutex_lock(&audio->lock);
n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened);
n += scnprintf(buffer + n, debug_bufmax - n,
"enabled %d\n", audio->enabled);
n += scnprintf(buffer + n, debug_bufmax - n,
"stopped %d\n", audio->stopped);
n += scnprintf(buffer + n, debug_bufmax - n,
"pcm_feedback %d\n", audio->pcm_feedback);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_buf_sz %d\n", audio->out[0].size);
n += scnprintf(buffer + n, debug_bufmax - n,
"pcm_buf_count %d \n", audio->pcm_buf_count);
n += scnprintf(buffer + n, debug_bufmax - n,
"pcm_buf_sz %d \n", audio->in[0].size);
n += scnprintf(buffer + n, debug_bufmax - n,
"volume %x \n", audio->vol_pan.volume);
mutex_unlock(&audio->lock);
/* Following variables are only useful for debugging when
* when playback halts unexpectedly. Thus, no mutual exclusion
* enforced
*/
n += scnprintf(buffer + n, debug_bufmax - n,
"wflush %d\n", audio->wflush);
n += scnprintf(buffer + n, debug_bufmax - n,
"rflush %d\n", audio->rflush);
n += scnprintf(buffer + n, debug_bufmax - n,
"running %d \n", audio->running);
n += scnprintf(buffer + n, debug_bufmax - n,
"dec state %d \n", audio->dec_state);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_needed %d \n", audio->out_needed);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_head %d \n", audio->out_head);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_tail %d \n", audio->out_tail);
n += scnprintf(buffer + n, debug_bufmax - n,
"out[0].used %d \n", audio->out[0].used);
n += scnprintf(buffer + n, debug_bufmax - n,
"out[1].used %d \n", audio->out[1].used);
n += scnprintf(buffer + n, debug_bufmax - n,
"buffer_refresh %d \n", audio->buf_refresh);
n += scnprintf(buffer + n, debug_bufmax - n,
"read_next %d \n", audio->read_next);
n += scnprintf(buffer + n, debug_bufmax - n,
"fill_next %d \n", audio->fill_next);
for (i = 0; i < audio->pcm_buf_count; i++)
n += scnprintf(buffer + n, debug_bufmax - n,
"in[%d].size %d \n", i, audio->in[i].used);
buffer[n] = 0;
return simple_read_from_buffer(buf, count, ppos, buffer, n);
}
static const struct file_operations audqcelp_debug_fops = {
.read = audqcelp_debug_read,
.open = audqcelp_debug_open,
};
#endif
static int audqcelp_open(struct inode *inode, struct file *file)
{
struct audio *audio = NULL;
int rc, dec_attrb, decid, i;
struct audqcelp_event *e_node = NULL;
#ifdef CONFIG_DEBUG_FS
/* 4 bytes represents decoder number, 1 byte for terminate string */
char name[sizeof "msm_qcelp_" + 5];
#endif
/* Create audio instance, set to zero */
audio = kzalloc(sizeof(struct audio), GFP_KERNEL);
if (!audio) {
MM_ERR("no memory to allocate audio instance\n");
rc = -ENOMEM;
goto done;
}
MM_INFO("audio instance 0x%08x created\n", (int)audio);
/* Allocate the decoder */
dec_attrb = AUDDEC_DEC_QCELP;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_mode & FMODE_READ)) {
dec_attrb |= MSM_AUD_MODE_NONTUNNEL;
audio->pcm_feedback = NON_TUNNEL_MODE_PLAYBACK;
} else if ((file->f_mode & FMODE_WRITE) &&
!(file->f_mode & FMODE_READ)) {
dec_attrb |= MSM_AUD_MODE_TUNNEL;
audio->pcm_feedback = TUNNEL_MODE_PLAYBACK;
} else {
kfree(audio);
rc = -EACCES;
goto done;
}
decid = audpp_adec_alloc(dec_attrb, &audio->module_name,
&audio->queue_id);
if (decid < 0) {
MM_ERR("No free decoder available, freeing instance 0x%08x\n",
(int)audio);
rc = -ENODEV;
kfree(audio);
goto done;
}
audio->dec_id = decid & MSM_AUD_DECODER_MASK;
audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
if (!audio->phys) {
MM_ERR("could not allocate write buffers, freeing instance \
0x%08x\n", (int)audio);
rc = -ENOMEM;
audpp_adec_free(audio->dec_id);
kfree(audio);
goto done;
} else {
audio->map_v_write = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write phys address, freeing \
instance 0x%08x\n", (int)audio);
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
goto done;
}
audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->phys, (int)audio->data);
}
rc = msm_adsp_get(audio->module_name, &audio->audplay,
&audplay_adsp_ops_qcelp, audio);
if (rc) {
MM_ERR("failed to get %s module, freeing instance 0x%08x\n",
audio->module_name, (int)audio);
goto err;
}
/* Initialize all locks of audio instance */
mutex_init(&audio->lock);
mutex_init(&audio->write_lock);
mutex_init(&audio->read_lock);
mutex_init(&audio->get_event_lock);
spin_lock_init(&audio->dsp_lock);
init_waitqueue_head(&audio->write_wait);
init_waitqueue_head(&audio->read_wait);
INIT_LIST_HEAD(&audio->free_event_queue);
INIT_LIST_HEAD(&audio->event_queue);
init_waitqueue_head(&audio->wait);
init_waitqueue_head(&audio->event_wait);
spin_lock_init(&audio->event_queue_lock);
init_waitqueue_head(&audio->avsync_wait);
/* Initialize buffer */
audio->out[0].data = audio->data + 0;
audio->out[0].addr = audio->phys + 0;
audio->out[0].size = BUFSZ;
audio->out[1].data = audio->data + BUFSZ;
audio->out[1].addr = audio->phys + BUFSZ;
audio->out[1].size = BUFSZ;
audio->vol_pan.volume = 0x2000;
audqcelp_flush(audio);
file->private_data = audio;
audio->opened = 1;
audio->device_events = AUDDEV_EVT_DEV_RDY
|AUDDEV_EVT_DEV_RLS|
AUDDEV_EVT_STREAM_VOL_CHG;
rc = auddev_register_evt_listner(audio->device_events,
AUDDEV_CLNT_DEC,
audio->dec_id,
qcelp_listner,
(void *)audio);
if (rc) {
MM_ERR("%s: failed to register listnet\n", __func__);
goto event_err;
}
#ifdef CONFIG_DEBUG_FS
snprintf(name, sizeof name, "msm_qcelp_%04x", audio->dec_id);
audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
NULL, (void *) audio, &audqcelp_debug_fops);
if (IS_ERR(audio->dentry))
MM_DBG("debugfs_create_file failed\n");
#endif
#ifdef CONFIG_HAS_EARLYSUSPEND
audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
audio->suspend_ctl.node.resume = audqcelp_resume;
audio->suspend_ctl.node.suspend = audqcelp_suspend;
audio->suspend_ctl.audio = audio;
register_early_suspend(&audio->suspend_ctl.node);
#endif
for (i = 0; i < AUDQCELP_EVENT_NUM; i++) {
e_node = kmalloc(sizeof(struct audqcelp_event), GFP_KERNEL);
if (e_node)
list_add_tail(&e_node->list, &audio->free_event_queue);
else {
MM_ERR("event pkt alloc failed\n");
break;
}
}
done:
return rc;
event_err:
msm_adsp_put(audio->audplay);
err:
iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
return rc;
}
static const struct file_operations audio_qcelp_fops = {
.owner = THIS_MODULE,
.open = audqcelp_open,
.release = audqcelp_release,
.read = audqcelp_read,
.write = audqcelp_write,
.unlocked_ioctl = audqcelp_ioctl,
.fsync = audqcelp_fsync,
};
struct miscdevice audio_qcelp_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_qcelp",
.fops = &audio_qcelp_fops,
};
static int __init audqcelp_init(void)
{
return misc_register(&audio_qcelp_misc);
}
static void __exit audqcelp_exit(void)
{
misc_deregister(&audio_qcelp_misc);
}
module_init(audqcelp_init);
module_exit(audqcelp_exit);
MODULE_DESCRIPTION("MSM QCELP 13K driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
vitaliyy/msm7x30 | drivers/staging/rt2860/common/cmm_mac_usb.c | 899 | 34844 | /*
*************************************************************************
* Ralink Tech Inc.
* 5F., No.36, Taiyuan St., Jhubei City,
* Hsinchu County 302,
* Taiwan, R.O.C.
*
* (c) Copyright 2002-2007, Ralink Technology, Inc.
*
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
*************************************************************************
*/
#ifdef RTMP_MAC_USB
#include "../rt_config.h"
/*
========================================================================
Routine Description:
Initialize receive data structures.
Arguments:
pAd Pointer to our adapter
Return Value:
NDIS_STATUS_SUCCESS
NDIS_STATUS_RESOURCES
Note:
Initialize all receive releated private buffer, include those define
in struct rt_rtmp_adapter structure and all private data structures. The mahor
work is to allocate buffer for each packet and chain buffer to
NDIS packet descriptor.
========================================================================
*/
int NICInitRecv(struct rt_rtmp_adapter *pAd)
{
u8 i;
int Status = NDIS_STATUS_SUCCESS;
struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;
DBGPRINT(RT_DEBUG_TRACE, ("--> NICInitRecv\n"));
pObj = pObj;
/*InterlockedExchange(&pAd->PendingRx, 0); */
pAd->PendingRx = 0;
pAd->NextRxBulkInReadIndex = 0; /* Next Rx Read index */
pAd->NextRxBulkInIndex = 0; /*RX_RING_SIZE -1; // Rx Bulk pointer */
pAd->NextRxBulkInPosition = 0;
for (i = 0; i < (RX_RING_SIZE); i++) {
struct rt_rx_context *pRxContext = &(pAd->RxContext[i]);
/*Allocate URB */
pRxContext->pUrb = RTUSB_ALLOC_URB(0);
if (pRxContext->pUrb == NULL) {
Status = NDIS_STATUS_RESOURCES;
goto out1;
}
/* Allocate transfer buffer */
pRxContext->TransferBuffer =
RTUSB_URB_ALLOC_BUFFER(pObj->pUsb_Dev, MAX_RXBULK_SIZE,
&pRxContext->data_dma);
if (pRxContext->TransferBuffer == NULL) {
Status = NDIS_STATUS_RESOURCES;
goto out1;
}
NdisZeroMemory(pRxContext->TransferBuffer, MAX_RXBULK_SIZE);
pRxContext->pAd = pAd;
pRxContext->pIrp = NULL;
pRxContext->InUse = FALSE;
pRxContext->IRPPending = FALSE;
pRxContext->Readable = FALSE;
/*pRxContext->ReorderInUse = FALSE; */
pRxContext->bRxHandling = FALSE;
pRxContext->BulkInOffset = 0;
}
DBGPRINT(RT_DEBUG_TRACE, ("<-- NICInitRecv(Status=%d)\n", Status));
return Status;
out1:
for (i = 0; i < (RX_RING_SIZE); i++) {
struct rt_rx_context *pRxContext = &(pAd->RxContext[i]);
if (NULL != pRxContext->TransferBuffer) {
RTUSB_URB_FREE_BUFFER(pObj->pUsb_Dev, MAX_RXBULK_SIZE,
pRxContext->TransferBuffer,
pRxContext->data_dma);
pRxContext->TransferBuffer = NULL;
}
if (NULL != pRxContext->pUrb) {
RTUSB_UNLINK_URB(pRxContext->pUrb);
RTUSB_FREE_URB(pRxContext->pUrb);
pRxContext->pUrb = NULL;
}
}
return Status;
}
/*
========================================================================
Routine Description:
Initialize transmit data structures.
Arguments:
pAd Pointer to our adapter
Return Value:
NDIS_STATUS_SUCCESS
NDIS_STATUS_RESOURCES
Note:
========================================================================
*/
int NICInitTransmit(struct rt_rtmp_adapter *pAd)
{
#define LM_USB_ALLOC(pObj, Context, TB_Type, BufferSize, Status, msg1, err1, msg2, err2) \
Context->pUrb = RTUSB_ALLOC_URB(0); \
if (Context->pUrb == NULL) { \
DBGPRINT(RT_DEBUG_ERROR, msg1); \
Status = NDIS_STATUS_RESOURCES; \
goto err1; } \
\
Context->TransferBuffer = \
(TB_Type)RTUSB_URB_ALLOC_BUFFER(pObj->pUsb_Dev, BufferSize, &Context->data_dma); \
if (Context->TransferBuffer == NULL) { \
DBGPRINT(RT_DEBUG_ERROR, msg2); \
Status = NDIS_STATUS_RESOURCES; \
goto err2; }
#define LM_URB_FREE(pObj, Context, BufferSize) \
if (NULL != Context->pUrb) { \
RTUSB_UNLINK_URB(Context->pUrb); \
RTUSB_FREE_URB(Context->pUrb); \
Context->pUrb = NULL; } \
if (NULL != Context->TransferBuffer) { \
RTUSB_URB_FREE_BUFFER(pObj->pUsb_Dev, BufferSize, \
Context->TransferBuffer, \
Context->data_dma); \
Context->TransferBuffer = NULL; }
u8 i, acidx;
int Status = NDIS_STATUS_SUCCESS;
struct rt_tx_context *pNullContext = &(pAd->NullContext);
struct rt_tx_context *pPsPollContext = &(pAd->PsPollContext);
struct rt_tx_context *pRTSContext = &(pAd->RTSContext);
struct rt_tx_context *pMLMEContext = NULL;
/* struct rt_ht_tx_context *pHTTXContext = NULL; */
struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;
void *RingBaseVa;
/* struct rt_rtmp_tx_ring *pTxRing; */
struct rt_rtmp_mgmt_ring *pMgmtRing;
DBGPRINT(RT_DEBUG_TRACE, ("--> NICInitTransmit\n"));
pObj = pObj;
/* Init 4 set of Tx parameters */
for (acidx = 0; acidx < NUM_OF_TX_RING; acidx++) {
/* Initialize all Transmit releated queues */
InitializeQueueHeader(&pAd->TxSwQueue[acidx]);
/* Next Local tx ring pointer waiting for buck out */
pAd->NextBulkOutIndex[acidx] = acidx;
pAd->BulkOutPending[acidx] = FALSE; /* Buck Out control flag */
/*pAd->DataBulkDoneIdx[acidx] = 0; */
}
/*pAd->NextMLMEIndex = 0; */
/*pAd->PushMgmtIndex = 0; */
/*pAd->PopMgmtIndex = 0; */
/*InterlockedExchange(&pAd->MgmtQueueSize, 0); */
/*InterlockedExchange(&pAd->TxCount, 0); */
/*pAd->PrioRingFirstIndex = 0; */
/*pAd->PrioRingTxCnt = 0; */
do {
/* */
/* TX_RING_SIZE, 4 ACs */
/* */
for (acidx = 0; acidx < 4; acidx++) {
struct rt_ht_tx_context *pHTTXContext = &(pAd->TxContext[acidx]);
NdisZeroMemory(pHTTXContext, sizeof(struct rt_ht_tx_context));
/*Allocate URB */
LM_USB_ALLOC(pObj, pHTTXContext, struct rt_httx_buffer *,
sizeof(struct rt_httx_buffer), Status,
("<-- ERROR in Alloc TX TxContext[%d] urb!\n",
acidx), done,
("<-- ERROR in Alloc TX TxContext[%d] struct rt_httx_buffer!\n",
acidx), out1);
NdisZeroMemory(pHTTXContext->TransferBuffer->
Aggregation, 4);
pHTTXContext->pAd = pAd;
pHTTXContext->pIrp = NULL;
pHTTXContext->IRPPending = FALSE;
pHTTXContext->NextBulkOutPosition = 0;
pHTTXContext->ENextBulkOutPosition = 0;
pHTTXContext->CurWritePosition = 0;
pHTTXContext->CurWriteRealPos = 0;
pHTTXContext->BulkOutSize = 0;
pHTTXContext->BulkOutPipeId = acidx;
pHTTXContext->bRingEmpty = TRUE;
pHTTXContext->bCopySavePad = FALSE;
pAd->BulkOutPending[acidx] = FALSE;
}
/* */
/* MGMT_RING_SIZE */
/* */
/* Allocate MGMT ring descriptor's memory */
pAd->MgmtDescRing.AllocSize =
MGMT_RING_SIZE * sizeof(struct rt_tx_context);
os_alloc_mem(pAd, (u8 **) (&pAd->MgmtDescRing.AllocVa),
pAd->MgmtDescRing.AllocSize);
if (pAd->MgmtDescRing.AllocVa == NULL) {
DBGPRINT_ERR(("Failed to allocate a big buffer for MgmtDescRing!\n"));
Status = NDIS_STATUS_RESOURCES;
goto out1;
}
NdisZeroMemory(pAd->MgmtDescRing.AllocVa,
pAd->MgmtDescRing.AllocSize);
RingBaseVa = pAd->MgmtDescRing.AllocVa;
/* Initialize MGMT Ring and associated buffer memory */
pMgmtRing = &pAd->MgmtRing;
for (i = 0; i < MGMT_RING_SIZE; i++) {
/* link the pre-allocated Mgmt buffer to MgmtRing.Cell */
pMgmtRing->Cell[i].AllocSize = sizeof(struct rt_tx_context);
pMgmtRing->Cell[i].AllocVa = RingBaseVa;
pMgmtRing->Cell[i].pNdisPacket = NULL;
pMgmtRing->Cell[i].pNextNdisPacket = NULL;
/*Allocate URB for MLMEContext */
pMLMEContext =
(struct rt_tx_context *)pAd->MgmtRing.Cell[i].AllocVa;
pMLMEContext->pUrb = RTUSB_ALLOC_URB(0);
if (pMLMEContext->pUrb == NULL) {
DBGPRINT(RT_DEBUG_ERROR,
("<-- ERROR in Alloc TX MLMEContext[%d] urb!\n",
i));
Status = NDIS_STATUS_RESOURCES;
goto out2;
}
pMLMEContext->pAd = pAd;
pMLMEContext->pIrp = NULL;
pMLMEContext->TransferBuffer = NULL;
pMLMEContext->InUse = FALSE;
pMLMEContext->IRPPending = FALSE;
pMLMEContext->bWaitingBulkOut = FALSE;
pMLMEContext->BulkOutSize = 0;
pMLMEContext->SelfIdx = i;
/* Offset to next ring descriptor address */
RingBaseVa = (u8 *)RingBaseVa + sizeof(struct rt_tx_context);
}
DBGPRINT(RT_DEBUG_TRACE,
("MGMT Ring: total %d entry allocated\n", i));
/*pAd->MgmtRing.TxSwFreeIdx = (MGMT_RING_SIZE - 1); */
pAd->MgmtRing.TxSwFreeIdx = MGMT_RING_SIZE;
pAd->MgmtRing.TxCpuIdx = 0;
pAd->MgmtRing.TxDmaIdx = 0;
/* */
/* BEACON_RING_SIZE */
/* */
for (i = 0; i < BEACON_RING_SIZE; i++) /* 2 */
{
struct rt_tx_context *pBeaconContext = &(pAd->BeaconContext[i]);
NdisZeroMemory(pBeaconContext, sizeof(struct rt_tx_context));
/*Allocate URB */
LM_USB_ALLOC(pObj, pBeaconContext, struct rt_tx_buffer *,
sizeof(struct rt_tx_buffer), Status,
("<-- ERROR in Alloc TX BeaconContext[%d] urb!\n",
i), out2,
("<-- ERROR in Alloc TX BeaconContext[%d] struct rt_tx_buffer!\n",
i), out3);
pBeaconContext->pAd = pAd;
pBeaconContext->pIrp = NULL;
pBeaconContext->InUse = FALSE;
pBeaconContext->IRPPending = FALSE;
}
/* */
/* NullContext */
/* */
NdisZeroMemory(pNullContext, sizeof(struct rt_tx_context));
/*Allocate URB */
LM_USB_ALLOC(pObj, pNullContext, struct rt_tx_buffer *, sizeof(struct rt_tx_buffer),
Status,
("<-- ERROR in Alloc TX NullContext urb!\n"),
out3,
("<-- ERROR in Alloc TX NullContext struct rt_tx_buffer!\n"),
out4);
pNullContext->pAd = pAd;
pNullContext->pIrp = NULL;
pNullContext->InUse = FALSE;
pNullContext->IRPPending = FALSE;
/* */
/* RTSContext */
/* */
NdisZeroMemory(pRTSContext, sizeof(struct rt_tx_context));
/*Allocate URB */
LM_USB_ALLOC(pObj, pRTSContext, struct rt_tx_buffer *, sizeof(struct rt_tx_buffer),
Status,
("<-- ERROR in Alloc TX RTSContext urb!\n"),
out4,
("<-- ERROR in Alloc TX RTSContext struct rt_tx_buffer!\n"),
out5);
pRTSContext->pAd = pAd;
pRTSContext->pIrp = NULL;
pRTSContext->InUse = FALSE;
pRTSContext->IRPPending = FALSE;
/* */
/* PsPollContext */
/* */
/*NdisZeroMemory(pPsPollContext, sizeof(struct rt_tx_context)); */
/*Allocate URB */
LM_USB_ALLOC(pObj, pPsPollContext, struct rt_tx_buffer *,
sizeof(struct rt_tx_buffer), Status,
("<-- ERROR in Alloc TX PsPollContext urb!\n"),
out5,
("<-- ERROR in Alloc TX PsPollContext struct rt_tx_buffer!\n"),
out6);
pPsPollContext->pAd = pAd;
pPsPollContext->pIrp = NULL;
pPsPollContext->InUse = FALSE;
pPsPollContext->IRPPending = FALSE;
pPsPollContext->bAggregatible = FALSE;
pPsPollContext->LastOne = TRUE;
} while (FALSE);
done:
DBGPRINT(RT_DEBUG_TRACE, ("<-- NICInitTransmit(Status=%d)\n", Status));
return Status;
/* --------------------------- ERROR HANDLE --------------------------- */
out6:
LM_URB_FREE(pObj, pPsPollContext, sizeof(struct rt_tx_buffer));
out5:
LM_URB_FREE(pObj, pRTSContext, sizeof(struct rt_tx_buffer));
out4:
LM_URB_FREE(pObj, pNullContext, sizeof(struct rt_tx_buffer));
out3:
for (i = 0; i < BEACON_RING_SIZE; i++) {
struct rt_tx_context *pBeaconContext = &(pAd->BeaconContext[i]);
if (pBeaconContext)
LM_URB_FREE(pObj, pBeaconContext, sizeof(struct rt_tx_buffer));
}
out2:
if (pAd->MgmtDescRing.AllocVa) {
pMgmtRing = &pAd->MgmtRing;
for (i = 0; i < MGMT_RING_SIZE; i++) {
pMLMEContext =
(struct rt_tx_context *)pAd->MgmtRing.Cell[i].AllocVa;
if (pMLMEContext)
LM_URB_FREE(pObj, pMLMEContext,
sizeof(struct rt_tx_buffer));
}
os_free_mem(pAd, pAd->MgmtDescRing.AllocVa);
pAd->MgmtDescRing.AllocVa = NULL;
}
out1:
for (acidx = 0; acidx < 4; acidx++) {
struct rt_ht_tx_context *pTxContext = &(pAd->TxContext[acidx]);
if (pTxContext)
LM_URB_FREE(pObj, pTxContext, sizeof(struct rt_httx_buffer));
}
/* Here we didn't have any pre-allocated memory need to free. */
return Status;
}
/*
========================================================================
Routine Description:
Allocate DMA memory blocks for send, receive.
Arguments:
pAd Pointer to our adapter
Return Value:
NDIS_STATUS_SUCCESS
NDIS_STATUS_FAILURE
NDIS_STATUS_RESOURCES
Note:
========================================================================
*/
int RTMPAllocTxRxRingMemory(struct rt_rtmp_adapter *pAd)
{
/* struct rt_counter_802_11 pCounter = &pAd->WlanCounters; */
int Status;
int num;
DBGPRINT(RT_DEBUG_TRACE, ("--> RTMPAllocTxRxRingMemory\n"));
do {
/* Init the struct rt_cmdq and CmdQLock */
NdisAllocateSpinLock(&pAd->CmdQLock);
NdisAcquireSpinLock(&pAd->CmdQLock);
RTUSBInitializeCmdQ(&pAd->CmdQ);
NdisReleaseSpinLock(&pAd->CmdQLock);
NdisAllocateSpinLock(&pAd->MLMEBulkOutLock);
/*NdisAllocateSpinLock(&pAd->MLMEWaitQueueLock); */
NdisAllocateSpinLock(&pAd->BulkOutLock[0]);
NdisAllocateSpinLock(&pAd->BulkOutLock[1]);
NdisAllocateSpinLock(&pAd->BulkOutLock[2]);
NdisAllocateSpinLock(&pAd->BulkOutLock[3]);
NdisAllocateSpinLock(&pAd->BulkOutLock[4]);
NdisAllocateSpinLock(&pAd->BulkOutLock[5]);
NdisAllocateSpinLock(&pAd->BulkInLock);
for (num = 0; num < NUM_OF_TX_RING; num++) {
NdisAllocateSpinLock(&pAd->TxContextQueueLock[num]);
}
/* NdisAllocateSpinLock(&pAd->MemLock); // Not used in RT28XX */
/* NdisAllocateSpinLock(&pAd->MacTabLock); // init it in UserCfgInit() */
/* NdisAllocateSpinLock(&pAd->BATabLock); // init it in BATableInit() */
/* for(num=0; num<MAX_LEN_OF_BA_REC_TABLE; num++) */
/* { */
/* NdisAllocateSpinLock(&pAd->BATable.BARecEntry[num].RxReRingLock); */
/* } */
/* */
/* Init Mac Table */
/* */
/* MacTableInitialize(pAd); */
/* */
/* Init send data structures and related parameters */
/* */
Status = NICInitTransmit(pAd);
if (Status != NDIS_STATUS_SUCCESS)
break;
/* */
/* Init receive data structures and related parameters */
/* */
Status = NICInitRecv(pAd);
if (Status != NDIS_STATUS_SUCCESS)
break;
pAd->PendingIoCount = 1;
} while (FALSE);
NdisZeroMemory(&pAd->FragFrame, sizeof(struct rt_fragment_frame));
pAd->FragFrame.pFragPacket =
RTMP_AllocateFragPacketBuffer(pAd, RX_BUFFER_NORMSIZE);
if (pAd->FragFrame.pFragPacket == NULL) {
Status = NDIS_STATUS_RESOURCES;
}
DBGPRINT_S(Status,
("<-- RTMPAllocTxRxRingMemory, Status=%x\n", Status));
return Status;
}
/*
========================================================================
Routine Description:
Calls USB_InterfaceStop and frees memory allocated for the URBs
calls NdisMDeregisterDevice and frees the memory
allocated in VNetInitialize for the Adapter Object
Arguments:
*pAd the raxx interface data pointer
Return Value:
None
Note:
========================================================================
*/
void RTMPFreeTxRxRingMemory(struct rt_rtmp_adapter *pAd)
{
#define LM_URB_FREE(pObj, Context, BufferSize) \
if (NULL != Context->pUrb) { \
RTUSB_UNLINK_URB(Context->pUrb); \
RTUSB_FREE_URB(Context->pUrb); \
Context->pUrb = NULL; } \
if (NULL != Context->TransferBuffer) { \
RTUSB_URB_FREE_BUFFER(pObj->pUsb_Dev, BufferSize, \
Context->TransferBuffer, \
Context->data_dma); \
Context->TransferBuffer = NULL; }
u32 i, acidx;
struct rt_tx_context *pNullContext = &pAd->NullContext;
struct rt_tx_context *pPsPollContext = &pAd->PsPollContext;
struct rt_tx_context *pRTSContext = &pAd->RTSContext;
/* struct rt_ht_tx_context *pHTTXContext; */
/*PRTMP_REORDERBUF pReorderBuf; */
struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;
/* struct rt_rtmp_tx_ring *pTxRing; */
DBGPRINT(RT_DEBUG_ERROR, ("---> RTMPFreeTxRxRingMemory\n"));
pObj = pObj;
/* Free all resources for the RECEIVE buffer queue. */
for (i = 0; i < (RX_RING_SIZE); i++) {
struct rt_rx_context *pRxContext = &(pAd->RxContext[i]);
if (pRxContext)
LM_URB_FREE(pObj, pRxContext, MAX_RXBULK_SIZE);
}
/* Free PsPoll frame resource */
LM_URB_FREE(pObj, pPsPollContext, sizeof(struct rt_tx_buffer));
/* Free NULL frame resource */
LM_URB_FREE(pObj, pNullContext, sizeof(struct rt_tx_buffer));
/* Free RTS frame resource */
LM_URB_FREE(pObj, pRTSContext, sizeof(struct rt_tx_buffer));
/* Free beacon frame resource */
for (i = 0; i < BEACON_RING_SIZE; i++) {
struct rt_tx_context *pBeaconContext = &(pAd->BeaconContext[i]);
if (pBeaconContext)
LM_URB_FREE(pObj, pBeaconContext, sizeof(struct rt_tx_buffer));
}
/* Free mgmt frame resource */
for (i = 0; i < MGMT_RING_SIZE; i++) {
struct rt_tx_context *pMLMEContext =
(struct rt_tx_context *)pAd->MgmtRing.Cell[i].AllocVa;
/*LM_URB_FREE(pObj, pMLMEContext, sizeof(struct rt_tx_buffer)); */
if (NULL != pAd->MgmtRing.Cell[i].pNdisPacket) {
RTMPFreeNdisPacket(pAd,
pAd->MgmtRing.Cell[i].pNdisPacket);
pAd->MgmtRing.Cell[i].pNdisPacket = NULL;
pMLMEContext->TransferBuffer = NULL;
}
if (pMLMEContext) {
if (NULL != pMLMEContext->pUrb) {
RTUSB_UNLINK_URB(pMLMEContext->pUrb);
RTUSB_FREE_URB(pMLMEContext->pUrb);
pMLMEContext->pUrb = NULL;
}
}
}
if (pAd->MgmtDescRing.AllocVa)
os_free_mem(pAd, pAd->MgmtDescRing.AllocVa);
/* Free Tx frame resource */
for (acidx = 0; acidx < 4; acidx++) {
struct rt_ht_tx_context *pHTTXContext = &(pAd->TxContext[acidx]);
if (pHTTXContext)
LM_URB_FREE(pObj, pHTTXContext, sizeof(struct rt_httx_buffer));
}
if (pAd->FragFrame.pFragPacket)
RELEASE_NDIS_PACKET(pAd, pAd->FragFrame.pFragPacket,
NDIS_STATUS_SUCCESS);
for (i = 0; i < 6; i++) {
NdisFreeSpinLock(&pAd->BulkOutLock[i]);
}
NdisFreeSpinLock(&pAd->BulkInLock);
NdisFreeSpinLock(&pAd->MLMEBulkOutLock);
NdisFreeSpinLock(&pAd->CmdQLock);
/* Clear all pending bulk-out request flags. */
RTUSB_CLEAR_BULK_FLAG(pAd, 0xffffffff);
/* NdisFreeSpinLock(&pAd->MacTabLock); */
/* for(i=0; i<MAX_LEN_OF_BA_REC_TABLE; i++) */
/* { */
/* NdisFreeSpinLock(&pAd->BATable.BARecEntry[i].RxReRingLock); */
/* } */
DBGPRINT(RT_DEBUG_ERROR, ("<--- RTMPFreeTxRxRingMemory\n"));
}
/*
========================================================================
Routine Description:
Write WLAN MAC address to USB 2870.
Arguments:
pAd Pointer to our adapter
Return Value:
NDIS_STATUS_SUCCESS
Note:
========================================================================
*/
int RTUSBWriteHWMACAddress(struct rt_rtmp_adapter *pAd)
{
MAC_DW0_STRUC StaMacReg0;
MAC_DW1_STRUC StaMacReg1;
int Status = NDIS_STATUS_SUCCESS;
LARGE_INTEGER NOW;
/* initialize the random number generator */
RTMP_GetCurrentSystemTime(&NOW);
if (pAd->bLocalAdminMAC != TRUE) {
pAd->CurrentAddress[0] = pAd->PermanentAddress[0];
pAd->CurrentAddress[1] = pAd->PermanentAddress[1];
pAd->CurrentAddress[2] = pAd->PermanentAddress[2];
pAd->CurrentAddress[3] = pAd->PermanentAddress[3];
pAd->CurrentAddress[4] = pAd->PermanentAddress[4];
pAd->CurrentAddress[5] = pAd->PermanentAddress[5];
}
/* Write New MAC address to MAC_CSR2 & MAC_CSR3 & let ASIC know our new MAC */
StaMacReg0.field.Byte0 = pAd->CurrentAddress[0];
StaMacReg0.field.Byte1 = pAd->CurrentAddress[1];
StaMacReg0.field.Byte2 = pAd->CurrentAddress[2];
StaMacReg0.field.Byte3 = pAd->CurrentAddress[3];
StaMacReg1.field.Byte4 = pAd->CurrentAddress[4];
StaMacReg1.field.Byte5 = pAd->CurrentAddress[5];
StaMacReg1.field.U2MeMask = 0xff;
DBGPRINT_RAW(RT_DEBUG_TRACE,
("Local MAC = %02x:%02x:%02x:%02x:%02x:%02x\n",
pAd->CurrentAddress[0], pAd->CurrentAddress[1],
pAd->CurrentAddress[2], pAd->CurrentAddress[3],
pAd->CurrentAddress[4], pAd->CurrentAddress[5]));
RTUSBWriteMACRegister(pAd, MAC_ADDR_DW0, StaMacReg0.word);
RTUSBWriteMACRegister(pAd, MAC_ADDR_DW1, StaMacReg1.word);
return Status;
}
/*
========================================================================
Routine Description:
Disable DMA.
Arguments:
*pAd the raxx interface data pointer
Return Value:
None
Note:
========================================================================
*/
void RT28XXDMADisable(struct rt_rtmp_adapter *pAd)
{
/* no use */
}
/*
========================================================================
Routine Description:
Enable DMA.
Arguments:
*pAd the raxx interface data pointer
Return Value:
None
Note:
========================================================================
*/
void RT28XXDMAEnable(struct rt_rtmp_adapter *pAd)
{
WPDMA_GLO_CFG_STRUC GloCfg;
USB_DMA_CFG_STRUC UsbCfg;
int i = 0;
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, 0x4);
do {
RTMP_IO_READ32(pAd, WPDMA_GLO_CFG, &GloCfg.word);
if ((GloCfg.field.TxDMABusy == 0)
&& (GloCfg.field.RxDMABusy == 0))
break;
DBGPRINT(RT_DEBUG_TRACE, ("==> DMABusy\n"));
RTMPusecDelay(1000);
i++;
} while (i < 200);
RTMPusecDelay(50);
GloCfg.field.EnTXWriteBackDDONE = 1;
GloCfg.field.EnableRxDMA = 1;
GloCfg.field.EnableTxDMA = 1;
DBGPRINT(RT_DEBUG_TRACE,
("<== WRITE DMA offset 0x208 = 0x%x\n", GloCfg.word));
RTMP_IO_WRITE32(pAd, WPDMA_GLO_CFG, GloCfg.word);
UsbCfg.word = 0;
UsbCfg.field.phyclear = 0;
/* usb version is 1.1,do not use bulk in aggregation */
if (pAd->BulkInMaxPacketSize == 512)
UsbCfg.field.RxBulkAggEn = 1;
/* for last packet, PBF might use more than limited, so minus 2 to prevent from error */
UsbCfg.field.RxBulkAggLmt = (MAX_RXBULK_SIZE / 1024) - 3;
UsbCfg.field.RxBulkAggTOut = 0x80; /* 2006-10-18 */
UsbCfg.field.RxBulkEn = 1;
UsbCfg.field.TxBulkEn = 1;
RTUSBWriteMACRegister(pAd, USB_DMA_CFG, UsbCfg.word);
}
/********************************************************************
*
* 2870 Beacon Update Related functions.
*
********************************************************************/
/*
========================================================================
Routine Description:
Write Beacon buffer to Asic.
Arguments:
*pAd the raxx interface data pointer
Return Value:
None
Note:
========================================================================
*/
void RT28xx_UpdateBeaconToAsic(struct rt_rtmp_adapter *pAd,
int apidx,
unsigned long FrameLen, unsigned long UpdatePos)
{
u8 *pBeaconFrame = NULL;
u8 *ptr;
u32 i, padding;
struct rt_beacon_sync *pBeaconSync = pAd->CommonCfg.pBeaconSync;
u32 longValue;
/* u16 shortValue; */
BOOLEAN bBcnReq = FALSE;
u8 bcn_idx = 0;
if (pBeaconFrame == NULL) {
DBGPRINT(RT_DEBUG_ERROR, ("pBeaconFrame is NULL!\n"));
return;
}
if (pBeaconSync == NULL) {
DBGPRINT(RT_DEBUG_ERROR, ("pBeaconSync is NULL!\n"));
return;
}
/*if ((pAd->WdsTab.Mode == WDS_BRIDGE_MODE) || */
/* ((pAd->ApCfg.MBSSID[apidx].MSSIDDev == NULL) || !(pAd->ApCfg.MBSSID[apidx].MSSIDDev->flags & IFF_UP)) */
/* ) */
if (bBcnReq == FALSE) {
/* when the ra interface is down, do not send its beacon frame */
/* clear all zero */
for (i = 0; i < TXWI_SIZE; i += 4) {
RTMP_IO_WRITE32(pAd, pAd->BeaconOffset[bcn_idx] + i,
0x00);
}
pBeaconSync->BeaconBitMap &=
(~(BEACON_BITMAP_MASK & (1 << bcn_idx)));
NdisZeroMemory(pBeaconSync->BeaconTxWI[bcn_idx], TXWI_SIZE);
} else {
ptr = (u8 *)& pAd->BeaconTxWI;
if (NdisEqualMemory(pBeaconSync->BeaconTxWI[bcn_idx], &pAd->BeaconTxWI, TXWI_SIZE) == FALSE) { /* If BeaconTxWI changed, we need to rewrite the TxWI for the Beacon frames. */
pBeaconSync->BeaconBitMap &=
(~(BEACON_BITMAP_MASK & (1 << bcn_idx)));
NdisMoveMemory(pBeaconSync->BeaconTxWI[bcn_idx],
&pAd->BeaconTxWI, TXWI_SIZE);
}
if ((pBeaconSync->BeaconBitMap & (1 << bcn_idx)) !=
(1 << bcn_idx)) {
for (i = 0; i < TXWI_SIZE; i += 4) /* 16-byte TXWI field */
{
longValue =
*ptr + (*(ptr + 1) << 8) +
(*(ptr + 2) << 16) + (*(ptr + 3) << 24);
RTMP_IO_WRITE32(pAd,
pAd->BeaconOffset[bcn_idx] + i,
longValue);
ptr += 4;
}
}
ptr = pBeaconSync->BeaconBuf[bcn_idx];
padding = (FrameLen & 0x01);
NdisZeroMemory((u8 *)(pBeaconFrame + FrameLen), padding);
FrameLen += padding;
for (i = 0; i < FrameLen /*HW_BEACON_OFFSET */ ; i += 2) {
if (NdisEqualMemory(ptr, pBeaconFrame, 2) == FALSE) {
NdisMoveMemory(ptr, pBeaconFrame, 2);
/*shortValue = *ptr + (*(ptr+1)<<8); */
/*RTMP_IO_WRITE8(pAd, pAd->BeaconOffset[bcn_idx] + TXWI_SIZE + i, shortValue); */
RTUSBMultiWrite(pAd,
pAd->BeaconOffset[bcn_idx] +
TXWI_SIZE + i, ptr, 2);
}
ptr += 2;
pBeaconFrame += 2;
}
pBeaconSync->BeaconBitMap |= (1 << bcn_idx);
/* For AP interface, set the DtimBitOn so that we can send Bcast/Mcast frame out after this beacon frame. */
}
}
void RTUSBBssBeaconStop(struct rt_rtmp_adapter *pAd)
{
struct rt_beacon_sync *pBeaconSync;
int i, offset;
BOOLEAN Cancelled = TRUE;
pBeaconSync = pAd->CommonCfg.pBeaconSync;
if (pBeaconSync && pBeaconSync->EnableBeacon) {
int NumOfBcn;
{
NumOfBcn = MAX_MESH_NUM;
}
RTMPCancelTimer(&pAd->CommonCfg.BeaconUpdateTimer, &Cancelled);
for (i = 0; i < NumOfBcn; i++) {
NdisZeroMemory(pBeaconSync->BeaconBuf[i],
HW_BEACON_OFFSET);
NdisZeroMemory(pBeaconSync->BeaconTxWI[i], TXWI_SIZE);
for (offset = 0; offset < HW_BEACON_OFFSET; offset += 4)
RTMP_IO_WRITE32(pAd,
pAd->BeaconOffset[i] + offset,
0x00);
pBeaconSync->CapabilityInfoLocationInBeacon[i] = 0;
pBeaconSync->TimIELocationInBeacon[i] = 0;
}
pBeaconSync->BeaconBitMap = 0;
pBeaconSync->DtimBitOn = 0;
}
}
void RTUSBBssBeaconStart(struct rt_rtmp_adapter *pAd)
{
int apidx;
struct rt_beacon_sync *pBeaconSync;
/* LARGE_INTEGER tsfTime, deltaTime; */
pBeaconSync = pAd->CommonCfg.pBeaconSync;
if (pBeaconSync && pBeaconSync->EnableBeacon) {
int NumOfBcn;
{
NumOfBcn = MAX_MESH_NUM;
}
for (apidx = 0; apidx < NumOfBcn; apidx++) {
u8 CapabilityInfoLocationInBeacon = 0;
u8 TimIELocationInBeacon = 0;
NdisZeroMemory(pBeaconSync->BeaconBuf[apidx],
HW_BEACON_OFFSET);
pBeaconSync->CapabilityInfoLocationInBeacon[apidx] =
CapabilityInfoLocationInBeacon;
pBeaconSync->TimIELocationInBeacon[apidx] =
TimIELocationInBeacon;
NdisZeroMemory(pBeaconSync->BeaconTxWI[apidx],
TXWI_SIZE);
}
pBeaconSync->BeaconBitMap = 0;
pBeaconSync->DtimBitOn = 0;
pAd->CommonCfg.BeaconUpdateTimer.Repeat = TRUE;
pAd->CommonCfg.BeaconAdjust = 0;
pAd->CommonCfg.BeaconFactor =
0xffffffff / (pAd->CommonCfg.BeaconPeriod << 10);
pAd->CommonCfg.BeaconRemain =
(0xffffffff % (pAd->CommonCfg.BeaconPeriod << 10)) + 1;
DBGPRINT(RT_DEBUG_TRACE,
("RTUSBBssBeaconStart:BeaconFactor=%d, BeaconRemain=%d!\n",
pAd->CommonCfg.BeaconFactor,
pAd->CommonCfg.BeaconRemain));
RTMPSetTimer(&pAd->CommonCfg.BeaconUpdateTimer,
10 /*pAd->CommonCfg.BeaconPeriod */ );
}
}
void RTUSBBssBeaconInit(struct rt_rtmp_adapter *pAd)
{
struct rt_beacon_sync *pBeaconSync;
int i;
os_alloc_mem(pAd, (u8 **) (&pAd->CommonCfg.pBeaconSync),
sizeof(struct rt_beacon_sync));
/*NdisAllocMemory(pAd->CommonCfg.pBeaconSync, sizeof(struct rt_beacon_sync), MEM_ALLOC_FLAG); */
if (pAd->CommonCfg.pBeaconSync) {
pBeaconSync = pAd->CommonCfg.pBeaconSync;
NdisZeroMemory(pBeaconSync, sizeof(struct rt_beacon_sync));
for (i = 0; i < HW_BEACON_MAX_COUNT; i++) {
NdisZeroMemory(pBeaconSync->BeaconBuf[i],
HW_BEACON_OFFSET);
pBeaconSync->CapabilityInfoLocationInBeacon[i] = 0;
pBeaconSync->TimIELocationInBeacon[i] = 0;
NdisZeroMemory(pBeaconSync->BeaconTxWI[i], TXWI_SIZE);
}
pBeaconSync->BeaconBitMap = 0;
/*RTMPInitTimer(pAd, &pAd->CommonCfg.BeaconUpdateTimer, GET_TIMER_FUNCTION(BeaconUpdateExec), pAd, TRUE); */
pBeaconSync->EnableBeacon = TRUE;
}
}
void RTUSBBssBeaconExit(struct rt_rtmp_adapter *pAd)
{
struct rt_beacon_sync *pBeaconSync;
BOOLEAN Cancelled = TRUE;
int i;
if (pAd->CommonCfg.pBeaconSync) {
pBeaconSync = pAd->CommonCfg.pBeaconSync;
pBeaconSync->EnableBeacon = FALSE;
RTMPCancelTimer(&pAd->CommonCfg.BeaconUpdateTimer, &Cancelled);
pBeaconSync->BeaconBitMap = 0;
for (i = 0; i < HW_BEACON_MAX_COUNT; i++) {
NdisZeroMemory(pBeaconSync->BeaconBuf[i],
HW_BEACON_OFFSET);
pBeaconSync->CapabilityInfoLocationInBeacon[i] = 0;
pBeaconSync->TimIELocationInBeacon[i] = 0;
NdisZeroMemory(pBeaconSync->BeaconTxWI[i], TXWI_SIZE);
}
os_free_mem(pAd, pAd->CommonCfg.pBeaconSync);
pAd->CommonCfg.pBeaconSync = NULL;
}
}
/*
========================================================================
Routine Description:
For device work as AP mode but didn't have TBTT interrupt event, we need a mechanism
to update the beacon context in each Beacon interval. Here we use a periodical timer
to simulate the TBTT interrupt to handle the beacon context update.
Arguments:
SystemSpecific1 - Not used.
FunctionContext - Pointer to our Adapter context.
SystemSpecific2 - Not used.
SystemSpecific3 - Not used.
Return Value:
None
========================================================================
*/
void BeaconUpdateExec(void *SystemSpecific1,
void *FunctionContext,
void *SystemSpecific2, void *SystemSpecific3)
{
struct rt_rtmp_adapter *pAd = (struct rt_rtmp_adapter *)FunctionContext;
LARGE_INTEGER tsfTime_a; /*, tsfTime_b, deltaTime_exp, deltaTime_ab; */
u32 delta, delta2MS, period2US, remain, remain_low, remain_high;
/* BOOLEAN positive; */
if (pAd->CommonCfg.IsUpdateBeacon == TRUE) {
ReSyncBeaconTime(pAd);
}
RTMP_IO_READ32(pAd, TSF_TIMER_DW0, &tsfTime_a.u.LowPart);
RTMP_IO_READ32(pAd, TSF_TIMER_DW1, &tsfTime_a.u.HighPart);
/*positive=getDeltaTime(tsfTime_a, expectedTime, &deltaTime_exp); */
period2US = (pAd->CommonCfg.BeaconPeriod << 10);
remain_high = pAd->CommonCfg.BeaconRemain * tsfTime_a.u.HighPart;
remain_low = tsfTime_a.u.LowPart % (pAd->CommonCfg.BeaconPeriod << 10);
remain =
(remain_high + remain_low) % (pAd->CommonCfg.BeaconPeriod << 10);
delta = (pAd->CommonCfg.BeaconPeriod << 10) - remain;
delta2MS = (delta >> 10);
if (delta2MS > 150) {
pAd->CommonCfg.BeaconUpdateTimer.TimerValue = 100;
pAd->CommonCfg.IsUpdateBeacon = FALSE;
} else {
pAd->CommonCfg.BeaconUpdateTimer.TimerValue = delta2MS + 10;
pAd->CommonCfg.IsUpdateBeacon = TRUE;
}
}
/********************************************************************
*
* 2870 Radio on/off Related functions.
*
********************************************************************/
void RT28xxUsbMlmeRadioOn(struct rt_rtmp_adapter *pAd)
{
struct rt_rtmp_chip_op *pChipOps = &pAd->chipOps;
DBGPRINT(RT_DEBUG_TRACE, ("RT28xxUsbMlmeRadioOn()\n"));
if (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF))
return;
{
AsicSendCommandToMcu(pAd, 0x31, 0xff, 0x00, 0x02);
RTMPusecDelay(10000);
}
/*NICResetFromError(pAd); */
/* Enable Tx/Rx */
RTMPEnableRxTx(pAd);
if (pChipOps->AsicReverseRfFromSleepMode)
pChipOps->AsicReverseRfFromSleepMode(pAd);
/* Clear Radio off flag */
RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF);
RTUSBBulkReceive(pAd);
/* Set LED */
RTMPSetLED(pAd, LED_RADIO_ON);
}
void RT28xxUsbMlmeRadioOFF(struct rt_rtmp_adapter *pAd)
{
WPDMA_GLO_CFG_STRUC GloCfg;
u32 Value, i;
DBGPRINT(RT_DEBUG_TRACE, ("RT28xxUsbMlmeRadioOFF()\n"));
if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF))
return;
/* Clear PMKID cache. */
pAd->StaCfg.SavedPMKNum = 0;
RTMPZeroMemory(pAd->StaCfg.SavedPMK, (PMKID_NO * sizeof(struct rt_bssid_info)));
/* Link down first if any association exists */
if (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)) {
if (INFRA_ON(pAd) || ADHOC_ON(pAd)) {
struct rt_mlme_disassoc_req DisReq;
struct rt_mlme_queue_elem *pMsgElem =
kmalloc(sizeof(struct rt_mlme_queue_elem),
MEM_ALLOC_FLAG);
if (pMsgElem) {
COPY_MAC_ADDR(&DisReq.Addr,
pAd->CommonCfg.Bssid);
DisReq.Reason = REASON_DISASSOC_STA_LEAVING;
pMsgElem->Machine = ASSOC_STATE_MACHINE;
pMsgElem->MsgType = MT2_MLME_DISASSOC_REQ;
pMsgElem->MsgLen =
sizeof(struct rt_mlme_disassoc_req);
NdisMoveMemory(pMsgElem->Msg, &DisReq,
sizeof
(struct rt_mlme_disassoc_req));
MlmeDisassocReqAction(pAd, pMsgElem);
kfree(pMsgElem);
RTMPusecDelay(1000);
}
}
}
/* Set Radio off flag */
RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF);
{
/* Link down first if any association exists */
if (INFRA_ON(pAd) || ADHOC_ON(pAd))
LinkDown(pAd, FALSE);
RTMPusecDelay(10000);
/*========================================== */
/* Clean up old bss table */
BssTableInit(&pAd->ScanTab);
}
/* Set LED */
RTMPSetLED(pAd, LED_RADIO_OFF);
if (pAd->CommonCfg.BBPCurrentBW == BW_40) {
/* Must using 40MHz. */
AsicTurnOffRFClk(pAd, pAd->CommonCfg.CentralChannel);
} else {
/* Must using 20MHz. */
AsicTurnOffRFClk(pAd, pAd->CommonCfg.Channel);
}
/* Disable Tx/Rx DMA */
RTUSBReadMACRegister(pAd, WPDMA_GLO_CFG, &GloCfg.word); /* disable DMA */
GloCfg.field.EnableTxDMA = 0;
GloCfg.field.EnableRxDMA = 0;
RTUSBWriteMACRegister(pAd, WPDMA_GLO_CFG, GloCfg.word); /* abort all TX rings */
/* Waiting for DMA idle */
i = 0;
do {
RTMP_IO_READ32(pAd, WPDMA_GLO_CFG, &GloCfg.word);
if ((GloCfg.field.TxDMABusy == 0)
&& (GloCfg.field.RxDMABusy == 0))
break;
RTMPusecDelay(1000);
} while (i++ < 100);
/* Disable MAC Tx/Rx */
RTMP_IO_READ32(pAd, MAC_SYS_CTRL, &Value);
Value &= (0xfffffff3);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, Value);
{
AsicSendCommandToMcu(pAd, 0x30, 0xff, 0xff, 0x02);
}
}
#endif /* RTMP_MAC_USB // */
| gpl-2.0 |
invisiblek/kernel_808l | drivers/staging/comedi/drivers/ni_mio_common.c | 899 | 165825 | /*
comedi/drivers/ni_mio_common.c
Hardware driver for DAQ-STC based boards
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
This file is meant to be included by another file, e.g.,
ni_atmio.c or ni_pcimio.c.
Interrupt support originally added by Truxton Fulton
<trux@truxton.com>
References (from ftp://ftp.natinst.com/support/manuals):
340747b.pdf AT-MIO E series Register Level Programmer Manual
341079b.pdf PCI E Series RLPM
340934b.pdf DAQ-STC reference manual
67xx and 611x registers (from http://www.ni.com/pdf/daq/us)
release_ni611x.pdf
release_ni67xx.pdf
Other possibly relevant info:
320517c.pdf User manual (obsolete)
320517f.pdf User manual (new)
320889a.pdf delete
320906c.pdf maximum signal ratings
321066a.pdf about 16x
321791a.pdf discontinuation of at-mio-16e-10 rev. c
321808a.pdf about at-mio-16e-10 rev P
321837a.pdf discontinuation of at-mio-16de-10 rev d
321838a.pdf about at-mio-16de-10 rev N
ISSUES:
- the interrupt routine needs to be cleaned up
2006-02-07: S-Series PCI-6143: Support has been added but is not
fully tested as yet. Terry Barnaby, BEAM Ltd.
*/
/* #define DEBUG_INTERRUPT */
/* #define DEBUG_STATUS_A */
/* #define DEBUG_STATUS_B */
#include <linux/interrupt.h>
#include <linux/sched.h>
#include "8255.h"
#include "mite.h"
#include "comedi_fc.h"
#ifndef MDPRINTK
#define MDPRINTK(format, args...)
#endif
/* A timeout count */
#define NI_TIMEOUT 1000
static const unsigned old_RTSI_clock_channel = 7;
/* Note: this table must match the ai_gain_* definitions */
static const short ni_gainlkup[][16] = {
[ai_gain_16] = {0, 1, 2, 3, 4, 5, 6, 7,
0x100, 0x101, 0x102, 0x103, 0x104, 0x105, 0x106, 0x107},
[ai_gain_8] = {1, 2, 4, 7, 0x101, 0x102, 0x104, 0x107},
[ai_gain_14] = {1, 2, 3, 4, 5, 6, 7,
0x101, 0x102, 0x103, 0x104, 0x105, 0x106, 0x107},
[ai_gain_4] = {0, 1, 4, 7},
[ai_gain_611x] = {0x00a, 0x00b, 0x001, 0x002,
0x003, 0x004, 0x005, 0x006},
[ai_gain_622x] = {0, 1, 4, 5},
[ai_gain_628x] = {1, 2, 3, 4, 5, 6, 7},
[ai_gain_6143] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
};
static const struct comedi_lrange range_ni_E_ai = { 16, {
RANGE(-10, 10),
RANGE(-5, 5),
RANGE(-2.5, 2.5),
RANGE(-1, 1),
RANGE(-0.5, 0.5),
RANGE(-0.25, 0.25),
RANGE(-0.1, 0.1),
RANGE(-0.05, 0.05),
RANGE(0, 20),
RANGE(0, 10),
RANGE(0, 5),
RANGE(0, 2),
RANGE(0, 1),
RANGE(0, 0.5),
RANGE(0, 0.2),
RANGE(0, 0.1),
}
};
static const struct comedi_lrange range_ni_E_ai_limited = { 8, {
RANGE(-10, 10),
RANGE(-5, 5),
RANGE(-1, 1),
RANGE(-0.1,
0.1),
RANGE(0, 10),
RANGE(0, 5),
RANGE(0, 1),
RANGE(0, 0.1),
}
};
static const struct comedi_lrange range_ni_E_ai_limited14 = { 14, {
RANGE(-10,
10),
RANGE(-5, 5),
RANGE(-2, 2),
RANGE(-1, 1),
RANGE(-0.5,
0.5),
RANGE(-0.2,
0.2),
RANGE(-0.1,
0.1),
RANGE(0, 10),
RANGE(0, 5),
RANGE(0, 2),
RANGE(0, 1),
RANGE(0,
0.5),
RANGE(0,
0.2),
RANGE(0,
0.1),
}
};
static const struct comedi_lrange range_ni_E_ai_bipolar4 = { 4, {
RANGE(-10, 10),
RANGE(-5, 5),
RANGE(-0.5,
0.5),
RANGE(-0.05,
0.05),
}
};
static const struct comedi_lrange range_ni_E_ai_611x = { 8, {
RANGE(-50, 50),
RANGE(-20, 20),
RANGE(-10, 10),
RANGE(-5, 5),
RANGE(-2, 2),
RANGE(-1, 1),
RANGE(-0.5, 0.5),
RANGE(-0.2, 0.2),
}
};
static const struct comedi_lrange range_ni_M_ai_622x = { 4, {
RANGE(-10, 10),
RANGE(-5, 5),
RANGE(-1, 1),
RANGE(-0.2, 0.2),
}
};
static const struct comedi_lrange range_ni_M_ai_628x = { 7, {
RANGE(-10, 10),
RANGE(-5, 5),
RANGE(-2, 2),
RANGE(-1, 1),
RANGE(-0.5, 0.5),
RANGE(-0.2, 0.2),
RANGE(-0.1, 0.1),
}
};
static const struct comedi_lrange range_ni_S_ai_6143 = { 1, {
RANGE(-5, +5),
}
};
static const struct comedi_lrange range_ni_E_ao_ext = { 4, {
RANGE(-10, 10),
RANGE(0, 10),
RANGE_ext(-1, 1),
RANGE_ext(0, 1),
}
};
static const struct comedi_lrange *const ni_range_lkup[] = {
[ai_gain_16] = &range_ni_E_ai,
[ai_gain_8] = &range_ni_E_ai_limited,
[ai_gain_14] = &range_ni_E_ai_limited14,
[ai_gain_4] = &range_ni_E_ai_bipolar4,
[ai_gain_611x] = &range_ni_E_ai_611x,
[ai_gain_622x] = &range_ni_M_ai_622x,
[ai_gain_628x] = &range_ni_M_ai_628x,
[ai_gain_6143] = &range_ni_S_ai_6143
};
static int ni_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_cdio_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd);
static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_cdio_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
static void handle_cdio_interrupt(struct comedi_device *dev);
static int ni_cdo_inttrig(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum);
static int ni_serial_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_serial_hw_readwrite8(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned char data_out,
unsigned char *data_in);
static int ni_serial_sw_readwrite8(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned char data_out,
unsigned char *data_in);
static int ni_calib_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_calib_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_eeprom_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_m_series_eeprom_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data);
static int ni_pfi_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_pfi_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static unsigned ni_old_get_pfi_routing(struct comedi_device *dev,
unsigned chan);
static void ni_rtsi_init(struct comedi_device *dev);
static int ni_rtsi_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_rtsi_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_read_eeprom(struct comedi_device *dev, int addr);
#ifdef DEBUG_STATUS_A
static void ni_mio_print_status_a(int status);
#else
#define ni_mio_print_status_a(a)
#endif
#ifdef DEBUG_STATUS_B
static void ni_mio_print_status_b(int status);
#else
#define ni_mio_print_status_b(a)
#endif
static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s);
#ifndef PCIDMA
static void ni_handle_fifo_half_full(struct comedi_device *dev);
static int ni_ao_fifo_half_empty(struct comedi_device *dev,
struct comedi_subdevice *s);
#endif
static void ni_handle_fifo_dregs(struct comedi_device *dev);
static int ni_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum);
static void ni_load_channelgain_list(struct comedi_device *dev,
unsigned int n_chan, unsigned int *list);
static void shutdown_ai_command(struct comedi_device *dev);
static int ni_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum);
static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_8255_callback(int dir, int port, int data, unsigned long arg);
static int ni_gpct_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_gpct_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_gpct_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_gpct_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd);
static int ni_gpct_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
static void handle_gpct_interrupt(struct comedi_device *dev,
unsigned short counter_index);
static int init_cs5529(struct comedi_device *dev);
static int cs5529_do_conversion(struct comedi_device *dev,
unsigned short *data);
static int cs5529_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
#ifdef NI_CS5529_DEBUG
static unsigned int cs5529_config_read(struct comedi_device *dev,
unsigned int reg_select_bits);
#endif
static void cs5529_config_write(struct comedi_device *dev, unsigned int value,
unsigned int reg_select_bits);
static int ni_m_series_pwm_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_6143_pwm_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int ni_set_master_clock(struct comedi_device *dev, unsigned source,
unsigned period_ns);
static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status);
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status);
enum aimodes {
AIMODE_NONE = 0,
AIMODE_HALF_FULL = 1,
AIMODE_SCAN = 2,
AIMODE_SAMPLE = 3,
};
enum ni_common_subdevices {
NI_AI_SUBDEV,
NI_AO_SUBDEV,
NI_DIO_SUBDEV,
NI_8255_DIO_SUBDEV,
NI_UNUSED_SUBDEV,
NI_CALIBRATION_SUBDEV,
NI_EEPROM_SUBDEV,
NI_PFI_DIO_SUBDEV,
NI_CS5529_CALIBRATION_SUBDEV,
NI_SERIAL_SUBDEV,
NI_RTSI_SUBDEV,
NI_GPCT0_SUBDEV,
NI_GPCT1_SUBDEV,
NI_FREQ_OUT_SUBDEV,
NI_NUM_SUBDEVICES
};
static inline unsigned NI_GPCT_SUBDEV(unsigned counter_index)
{
switch (counter_index) {
case 0:
return NI_GPCT0_SUBDEV;
break;
case 1:
return NI_GPCT1_SUBDEV;
break;
default:
break;
}
BUG();
return NI_GPCT0_SUBDEV;
}
enum timebase_nanoseconds {
TIMEBASE_1_NS = 50,
TIMEBASE_2_NS = 10000
};
#define SERIAL_DISABLED 0
#define SERIAL_600NS 600
#define SERIAL_1_2US 1200
#define SERIAL_10US 10000
static const int num_adc_stages_611x = 3;
static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
unsigned ai_mite_status);
static void handle_b_interrupt(struct comedi_device *dev, unsigned short status,
unsigned ao_mite_status);
static void get_last_sample_611x(struct comedi_device *dev);
static void get_last_sample_6143(struct comedi_device *dev);
static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
unsigned bit_mask, unsigned bit_values)
{
unsigned long flags;
spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
switch (reg) {
case Interrupt_A_Enable_Register:
devpriv->int_a_enable_reg &= ~bit_mask;
devpriv->int_a_enable_reg |= bit_values & bit_mask;
devpriv->stc_writew(dev, devpriv->int_a_enable_reg,
Interrupt_A_Enable_Register);
break;
case Interrupt_B_Enable_Register:
devpriv->int_b_enable_reg &= ~bit_mask;
devpriv->int_b_enable_reg |= bit_values & bit_mask;
devpriv->stc_writew(dev, devpriv->int_b_enable_reg,
Interrupt_B_Enable_Register);
break;
case IO_Bidirection_Pin_Register:
devpriv->io_bidirection_pin_reg &= ~bit_mask;
devpriv->io_bidirection_pin_reg |= bit_values & bit_mask;
devpriv->stc_writew(dev, devpriv->io_bidirection_pin_reg,
IO_Bidirection_Pin_Register);
break;
case AI_AO_Select:
devpriv->ai_ao_select_reg &= ~bit_mask;
devpriv->ai_ao_select_reg |= bit_values & bit_mask;
ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select);
break;
case G0_G1_Select:
devpriv->g0_g1_select_reg &= ~bit_mask;
devpriv->g0_g1_select_reg |= bit_values & bit_mask;
ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select);
break;
default:
printk("Warning %s() called with invalid register\n", __func__);
printk("reg is %d\n", reg);
break;
}
mmiowb();
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
#ifdef PCIDMA
static int ni_ai_drain_dma(struct comedi_device *dev);
/* DMA channel setup */
/* negative channel means no channel */
static inline void ni_set_ai_dma_channel(struct comedi_device *dev, int channel)
{
unsigned bitfield;
if (channel >= 0) {
bitfield =
(ni_stc_dma_channel_select_bitfield(channel) <<
AI_DMA_Select_Shift) & AI_DMA_Select_Mask;
} else {
bitfield = 0;
}
ni_set_bitfield(dev, AI_AO_Select, AI_DMA_Select_Mask, bitfield);
}
/* negative channel means no channel */
static inline void ni_set_ao_dma_channel(struct comedi_device *dev, int channel)
{
unsigned bitfield;
if (channel >= 0) {
bitfield =
(ni_stc_dma_channel_select_bitfield(channel) <<
AO_DMA_Select_Shift) & AO_DMA_Select_Mask;
} else {
bitfield = 0;
}
ni_set_bitfield(dev, AI_AO_Select, AO_DMA_Select_Mask, bitfield);
}
/* negative mite_channel means no channel */
static inline void ni_set_gpct_dma_channel(struct comedi_device *dev,
unsigned gpct_index,
int mite_channel)
{
unsigned bitfield;
if (mite_channel >= 0) {
bitfield = GPCT_DMA_Select_Bits(gpct_index, mite_channel);
} else {
bitfield = 0;
}
ni_set_bitfield(dev, G0_G1_Select, GPCT_DMA_Select_Mask(gpct_index),
bitfield);
}
/* negative mite_channel means no channel */
static inline void ni_set_cdo_dma_channel(struct comedi_device *dev,
int mite_channel)
{
unsigned long flags;
spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
devpriv->cdio_dma_select_reg &= ~CDO_DMA_Select_Mask;
if (mite_channel >= 0) {
/*XXX just guessing ni_stc_dma_channel_select_bitfield() returns the right bits,
under the assumption the cdio dma selection works just like ai/ao/gpct.
Definitely works for dma channels 0 and 1. */
devpriv->cdio_dma_select_reg |=
(ni_stc_dma_channel_select_bitfield(mite_channel) <<
CDO_DMA_Select_Shift) & CDO_DMA_Select_Mask;
}
ni_writeb(devpriv->cdio_dma_select_reg, M_Offset_CDIO_DMA_Select);
mmiowb();
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
static int ni_request_ai_mite_channel(struct comedi_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
BUG_ON(devpriv->ai_mite_chan);
devpriv->ai_mite_chan =
mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
if (devpriv->ai_mite_chan == NULL) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
comedi_error(dev,
"failed to reserve mite dma channel for analog input.");
return -EBUSY;
}
devpriv->ai_mite_chan->dir = COMEDI_INPUT;
ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
static int ni_request_ao_mite_channel(struct comedi_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
BUG_ON(devpriv->ao_mite_chan);
devpriv->ao_mite_chan =
mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
if (devpriv->ao_mite_chan == NULL) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
comedi_error(dev,
"failed to reserve mite dma channel for analog outut.");
return -EBUSY;
}
devpriv->ao_mite_chan->dir = COMEDI_OUTPUT;
ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
static int ni_request_gpct_mite_channel(struct comedi_device *dev,
unsigned gpct_index,
enum comedi_io_direction direction)
{
unsigned long flags;
struct mite_channel *mite_chan;
BUG_ON(gpct_index >= NUM_GPCT);
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
BUG_ON(devpriv->counter_dev->counters[gpct_index].mite_chan);
mite_chan =
mite_request_channel(devpriv->mite,
devpriv->gpct_mite_ring[gpct_index]);
if (mite_chan == NULL) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
comedi_error(dev,
"failed to reserve mite dma channel for counter.");
return -EBUSY;
}
mite_chan->dir = direction;
ni_tio_set_mite_channel(&devpriv->counter_dev->counters[gpct_index],
mite_chan);
ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
#endif /* PCIDMA */
static int ni_request_cdo_mite_channel(struct comedi_device *dev)
{
#ifdef PCIDMA
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
BUG_ON(devpriv->cdo_mite_chan);
devpriv->cdo_mite_chan =
mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
if (devpriv->cdo_mite_chan == NULL) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
comedi_error(dev,
"failed to reserve mite dma channel for correlated digital outut.");
return -EBUSY;
}
devpriv->cdo_mite_chan->dir = COMEDI_OUTPUT;
ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
#endif /* PCIDMA */
return 0;
}
static void ni_release_ai_mite_channel(struct comedi_device *dev)
{
#ifdef PCIDMA
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan) {
ni_set_ai_dma_channel(dev, -1);
mite_release_channel(devpriv->ai_mite_chan);
devpriv->ai_mite_chan = NULL;
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
#endif /* PCIDMA */
}
static void ni_release_ao_mite_channel(struct comedi_device *dev)
{
#ifdef PCIDMA
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ao_mite_chan) {
ni_set_ao_dma_channel(dev, -1);
mite_release_channel(devpriv->ao_mite_chan);
devpriv->ao_mite_chan = NULL;
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
#endif /* PCIDMA */
}
void ni_release_gpct_mite_channel(struct comedi_device *dev,
unsigned gpct_index)
{
#ifdef PCIDMA
unsigned long flags;
BUG_ON(gpct_index >= NUM_GPCT);
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->counter_dev->counters[gpct_index].mite_chan) {
struct mite_channel *mite_chan =
devpriv->counter_dev->counters[gpct_index].mite_chan;
ni_set_gpct_dma_channel(dev, gpct_index, -1);
ni_tio_set_mite_channel(&devpriv->
counter_dev->counters[gpct_index],
NULL);
mite_release_channel(mite_chan);
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
#endif /* PCIDMA */
}
static void ni_release_cdo_mite_channel(struct comedi_device *dev)
{
#ifdef PCIDMA
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
ni_set_cdo_dma_channel(dev, -1);
mite_release_channel(devpriv->cdo_mite_chan);
devpriv->cdo_mite_chan = NULL;
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
#endif /* PCIDMA */
}
/* e-series boards use the second irq signals to generate dma requests for their counters */
#ifdef PCIDMA
static void ni_e_series_enable_second_irq(struct comedi_device *dev,
unsigned gpct_index, short enable)
{
if (boardtype.reg_type & ni_reg_m_series_mask)
return;
switch (gpct_index) {
case 0:
if (enable) {
devpriv->stc_writew(dev, G0_Gate_Second_Irq_Enable,
Second_IRQ_A_Enable_Register);
} else {
devpriv->stc_writew(dev, 0,
Second_IRQ_A_Enable_Register);
}
break;
case 1:
if (enable) {
devpriv->stc_writew(dev, G1_Gate_Second_Irq_Enable,
Second_IRQ_B_Enable_Register);
} else {
devpriv->stc_writew(dev, 0,
Second_IRQ_B_Enable_Register);
}
break;
default:
BUG();
break;
}
}
#endif /* PCIDMA */
static void ni_clear_ai_fifo(struct comedi_device *dev)
{
if (boardtype.reg_type == ni_reg_6143) {
/* Flush the 6143 data FIFO */
ni_writel(0x10, AIFIFO_Control_6143); /* Flush fifo */
ni_writel(0x00, AIFIFO_Control_6143); /* Flush fifo */
while (ni_readl(AIFIFO_Status_6143) & 0x10) ; /* Wait for complete */
} else {
devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
if (boardtype.reg_type == ni_reg_625x) {
ni_writeb(0, M_Offset_Static_AI_Control(0));
ni_writeb(1, M_Offset_Static_AI_Control(0));
#if 0
/* the NI example code does 3 convert pulses for 625x boards,
but that appears to be wrong in practice. */
devpriv->stc_writew(dev, AI_CONVERT_Pulse,
AI_Command_1_Register);
devpriv->stc_writew(dev, AI_CONVERT_Pulse,
AI_Command_1_Register);
devpriv->stc_writew(dev, AI_CONVERT_Pulse,
AI_Command_1_Register);
#endif
}
}
}
static void win_out2(struct comedi_device *dev, uint32_t data, int reg)
{
devpriv->stc_writew(dev, data >> 16, reg);
devpriv->stc_writew(dev, data & 0xffff, reg + 1);
}
static uint32_t win_in2(struct comedi_device *dev, int reg)
{
uint32_t bits;
bits = devpriv->stc_readw(dev, reg) << 16;
bits |= devpriv->stc_readw(dev, reg + 1);
return bits;
}
#define ao_win_out(data, addr) ni_ao_win_outw(dev, data, addr)
static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data,
int addr)
{
unsigned long flags;
spin_lock_irqsave(&devpriv->window_lock, flags);
ni_writew(addr, AO_Window_Address_611x);
ni_writew(data, AO_Window_Data_611x);
spin_unlock_irqrestore(&devpriv->window_lock, flags);
}
static inline void ni_ao_win_outl(struct comedi_device *dev, uint32_t data,
int addr)
{
unsigned long flags;
spin_lock_irqsave(&devpriv->window_lock, flags);
ni_writew(addr, AO_Window_Address_611x);
ni_writel(data, AO_Window_Data_611x);
spin_unlock_irqrestore(&devpriv->window_lock, flags);
}
static inline unsigned short ni_ao_win_inw(struct comedi_device *dev, int addr)
{
unsigned long flags;
unsigned short data;
spin_lock_irqsave(&devpriv->window_lock, flags);
ni_writew(addr, AO_Window_Address_611x);
data = ni_readw(AO_Window_Data_611x);
spin_unlock_irqrestore(&devpriv->window_lock, flags);
return data;
}
/* ni_set_bits( ) allows different parts of the ni_mio_common driver to
* share registers (such as Interrupt_A_Register) without interfering with
* each other.
*
* NOTE: the switch/case statements are optimized out for a constant argument
* so this is actually quite fast--- If you must wrap another function around this
* make it inline to avoid a large speed penalty.
*
* value should only be 1 or 0.
*/
static inline void ni_set_bits(struct comedi_device *dev, int reg,
unsigned bits, unsigned value)
{
unsigned bit_values;
if (value)
bit_values = bits;
else
bit_values = 0;
ni_set_bitfield(dev, reg, bits, bit_values);
}
static irqreturn_t ni_E_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
unsigned short a_status;
unsigned short b_status;
unsigned int ai_mite_status = 0;
unsigned int ao_mite_status = 0;
unsigned long flags;
#ifdef PCIDMA
struct mite_struct *mite = devpriv->mite;
#endif
if (dev->attached == 0)
return IRQ_NONE;
smp_mb(); /* make sure dev->attached is checked before handler does anything else. */
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&dev->spinlock, flags);
a_status = devpriv->stc_readw(dev, AI_Status_1_Register);
b_status = devpriv->stc_readw(dev, AO_Status_1_Register);
#ifdef PCIDMA
if (mite) {
unsigned long flags_too;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too);
if (devpriv->ai_mite_chan) {
ai_mite_status = mite_get_status(devpriv->ai_mite_chan);
if (ai_mite_status & CHSR_LINKC)
writel(CHOR_CLRLC,
devpriv->mite->mite_io_addr +
MITE_CHOR(devpriv->
ai_mite_chan->channel));
}
if (devpriv->ao_mite_chan) {
ao_mite_status = mite_get_status(devpriv->ao_mite_chan);
if (ao_mite_status & CHSR_LINKC)
writel(CHOR_CLRLC,
mite->mite_io_addr +
MITE_CHOR(devpriv->
ao_mite_chan->channel));
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags_too);
}
#endif
ack_a_interrupt(dev, a_status);
ack_b_interrupt(dev, b_status);
if ((a_status & Interrupt_A_St) || (ai_mite_status & CHSR_INT))
handle_a_interrupt(dev, a_status, ai_mite_status);
if ((b_status & Interrupt_B_St) || (ao_mite_status & CHSR_INT))
handle_b_interrupt(dev, b_status, ao_mite_status);
handle_gpct_interrupt(dev, 0);
handle_gpct_interrupt(dev, 1);
handle_cdio_interrupt(dev);
spin_unlock_irqrestore(&dev->spinlock, flags);
return IRQ_HANDLED;
}
#ifdef PCIDMA
static void ni_sync_ai_dma(struct comedi_device *dev)
{
struct comedi_subdevice *s = dev->subdevices + NI_AI_SUBDEV;
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan)
mite_sync_input_dma(devpriv->ai_mite_chan, s->async);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
static void mite_handle_b_linkc(struct mite_struct *mite,
struct comedi_device *dev)
{
struct comedi_subdevice *s = dev->subdevices + NI_AO_SUBDEV;
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ao_mite_chan) {
mite_sync_output_dma(devpriv->ao_mite_chan, s->async);
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
{
static const int timeout = 10000;
int i;
for (i = 0; i < timeout; i++) {
unsigned short b_status;
b_status = devpriv->stc_readw(dev, AO_Status_1_Register);
if (b_status & AO_FIFO_Half_Full_St)
break;
/* if we poll too often, the pci bus activity seems
to slow the dma transfer down */
udelay(10);
}
if (i == timeout) {
comedi_error(dev, "timed out waiting for dma load");
return -EPIPE;
}
return 0;
}
#endif /* PCIDMA */
static void ni_handle_eos(struct comedi_device *dev, struct comedi_subdevice *s)
{
if (devpriv->aimode == AIMODE_SCAN) {
#ifdef PCIDMA
static const int timeout = 10;
int i;
for (i = 0; i < timeout; i++) {
ni_sync_ai_dma(dev);
if ((s->async->events & COMEDI_CB_EOS))
break;
udelay(1);
}
#else
ni_handle_fifo_dregs(dev);
s->async->events |= COMEDI_CB_EOS;
#endif
}
/* handle special case of single scan using AI_End_On_End_Of_Scan */
if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
shutdown_ai_command(dev);
}
}
static void shutdown_ai_command(struct comedi_device *dev)
{
struct comedi_subdevice *s = dev->subdevices + NI_AI_SUBDEV;
#ifdef PCIDMA
ni_ai_drain_dma(dev);
#endif
ni_handle_fifo_dregs(dev);
get_last_sample_611x(dev);
get_last_sample_6143(dev);
s->async->events |= COMEDI_CB_EOA;
}
static void ni_event(struct comedi_device *dev, struct comedi_subdevice *s)
{
if (s->
async->events & (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW |
COMEDI_CB_EOA)) {
switch (s - dev->subdevices) {
case NI_AI_SUBDEV:
ni_ai_reset(dev, s);
break;
case NI_AO_SUBDEV:
ni_ao_reset(dev, s);
break;
case NI_GPCT0_SUBDEV:
case NI_GPCT1_SUBDEV:
ni_gpct_cancel(dev, s);
break;
case NI_DIO_SUBDEV:
ni_cdio_cancel(dev, s);
break;
default:
break;
}
}
comedi_event(dev, s);
}
static void handle_gpct_interrupt(struct comedi_device *dev,
unsigned short counter_index)
{
#ifdef PCIDMA
struct comedi_subdevice *s =
dev->subdevices + NI_GPCT_SUBDEV(counter_index);
ni_tio_handle_interrupt(&devpriv->counter_dev->counters[counter_index],
s);
if (s->async->events)
ni_event(dev, s);
#endif
}
static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
{
unsigned short ack = 0;
if (a_status & AI_SC_TC_St) {
ack |= AI_SC_TC_Interrupt_Ack;
}
if (a_status & AI_START1_St) {
ack |= AI_START1_Interrupt_Ack;
}
if (a_status & AI_START_St) {
ack |= AI_START_Interrupt_Ack;
}
if (a_status & AI_STOP_St) {
/* not sure why we used to ack the START here also, instead of doing it independently. Frank Hess 2007-07-06 */
ack |= AI_STOP_Interrupt_Ack /*| AI_START_Interrupt_Ack */ ;
}
if (ack)
devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register);
}
static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
unsigned ai_mite_status)
{
struct comedi_subdevice *s = dev->subdevices + NI_AI_SUBDEV;
/* 67xx boards don't have ai subdevice, but their gpct0 might generate an a interrupt */
if (s->type == COMEDI_SUBD_UNUSED)
return;
#ifdef DEBUG_INTERRUPT
printk
("ni_mio_common: interrupt: a_status=%04x ai_mite_status=%08x\n",
status, ai_mite_status);
ni_mio_print_status_a(status);
#endif
#ifdef PCIDMA
if (ai_mite_status & CHSR_LINKC) {
ni_sync_ai_dma(dev);
}
if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
printk
("unknown mite interrupt, ack! (ai_mite_status=%08x)\n",
ai_mite_status);
/* mite_print_chsr(ai_mite_status); */
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
/* disable_irq(dev->irq); */
}
#endif
/* test for all uncommon interrupt events at the same time */
if (status & (AI_Overrun_St | AI_Overflow_St | AI_SC_TC_Error_St |
AI_SC_TC_St | AI_START1_St)) {
if (status == 0xffff) {
printk
("ni_mio_common: a_status=0xffff. Card removed?\n");
/* we probably aren't even running a command now,
* so it's a good idea to be careful. */
if (comedi_get_subdevice_runflags(s) & SRF_RUNNING) {
s->async->events |=
COMEDI_CB_ERROR | COMEDI_CB_EOA;
ni_event(dev, s);
}
return;
}
if (status & (AI_Overrun_St | AI_Overflow_St |
AI_SC_TC_Error_St)) {
printk("ni_mio_common: ai error a_status=%04x\n",
status);
ni_mio_print_status_a(status);
shutdown_ai_command(dev);
s->async->events |= COMEDI_CB_ERROR;
if (status & (AI_Overrun_St | AI_Overflow_St))
s->async->events |= COMEDI_CB_OVERFLOW;
ni_event(dev, s);
return;
}
if (status & AI_SC_TC_St) {
#ifdef DEBUG_INTERRUPT
printk("ni_mio_common: SC_TC interrupt\n");
#endif
if (!devpriv->ai_continuous) {
shutdown_ai_command(dev);
}
}
}
#ifndef PCIDMA
if (status & AI_FIFO_Half_Full_St) {
int i;
static const int timeout = 10;
/* pcmcia cards (at least 6036) seem to stop producing interrupts if we
*fail to get the fifo less than half full, so loop to be sure.*/
for (i = 0; i < timeout; ++i) {
ni_handle_fifo_half_full(dev);
if ((devpriv->stc_readw(dev,
AI_Status_1_Register) &
AI_FIFO_Half_Full_St) == 0)
break;
}
}
#endif /* !PCIDMA */
if ((status & AI_STOP_St)) {
ni_handle_eos(dev, s);
}
ni_event(dev, s);
#ifdef DEBUG_INTERRUPT
status = devpriv->stc_readw(dev, AI_Status_1_Register);
if (status & Interrupt_A_St) {
printk
("handle_a_interrupt: didn't clear interrupt? status=0x%x\n",
status);
}
#endif
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
{
unsigned short ack = 0;
if (b_status & AO_BC_TC_St) {
ack |= AO_BC_TC_Interrupt_Ack;
}
if (b_status & AO_Overrun_St) {
ack |= AO_Error_Interrupt_Ack;
}
if (b_status & AO_START_St) {
ack |= AO_START_Interrupt_Ack;
}
if (b_status & AO_START1_St) {
ack |= AO_START1_Interrupt_Ack;
}
if (b_status & AO_UC_TC_St) {
ack |= AO_UC_TC_Interrupt_Ack;
}
if (b_status & AO_UI2_TC_St) {
ack |= AO_UI2_TC_Interrupt_Ack;
}
if (b_status & AO_UPDATE_St) {
ack |= AO_UPDATE_Interrupt_Ack;
}
if (ack)
devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register);
}
static void handle_b_interrupt(struct comedi_device *dev,
unsigned short b_status, unsigned ao_mite_status)
{
struct comedi_subdevice *s = dev->subdevices + NI_AO_SUBDEV;
/* unsigned short ack=0; */
#ifdef DEBUG_INTERRUPT
printk("ni_mio_common: interrupt: b_status=%04x m1_status=%08x\n",
b_status, ao_mite_status);
ni_mio_print_status_b(b_status);
#endif
#ifdef PCIDMA
/* Currently, mite.c requires us to handle LINKC */
if (ao_mite_status & CHSR_LINKC) {
mite_handle_b_linkc(devpriv->mite, dev);
}
if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
printk
("unknown mite interrupt, ack! (ao_mite_status=%08x)\n",
ao_mite_status);
/* mite_print_chsr(ao_mite_status); */
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
#endif
if (b_status == 0xffff)
return;
if (b_status & AO_Overrun_St) {
printk
("ni_mio_common: AO FIFO underrun status=0x%04x status2=0x%04x\n",
b_status, devpriv->stc_readw(dev, AO_Status_2_Register));
s->async->events |= COMEDI_CB_OVERFLOW;
}
if (b_status & AO_BC_TC_St) {
MDPRINTK
("ni_mio_common: AO BC_TC status=0x%04x status2=0x%04x\n",
b_status, devpriv->stc_readw(dev, AO_Status_2_Register));
s->async->events |= COMEDI_CB_EOA;
}
#ifndef PCIDMA
if (b_status & AO_FIFO_Request_St) {
int ret;
ret = ni_ao_fifo_half_empty(dev, s);
if (!ret) {
printk("ni_mio_common: AO buffer underrun\n");
ni_set_bits(dev, Interrupt_B_Enable_Register,
AO_FIFO_Interrupt_Enable |
AO_Error_Interrupt_Enable, 0);
s->async->events |= COMEDI_CB_OVERFLOW;
}
}
#endif
ni_event(dev, s);
}
#ifdef DEBUG_STATUS_A
static const char *const status_a_strings[] = {
"passthru0", "fifo", "G0_gate", "G0_TC",
"stop", "start", "sc_tc", "start1",
"start2", "sc_tc_error", "overflow", "overrun",
"fifo_empty", "fifo_half_full", "fifo_full", "interrupt_a"
};
static void ni_mio_print_status_a(int status)
{
int i;
printk("A status:");
for (i = 15; i >= 0; i--) {
if (status & (1 << i)) {
printk(" %s", status_a_strings[i]);
}
}
printk("\n");
}
#endif
#ifdef DEBUG_STATUS_B
static const char *const status_b_strings[] = {
"passthru1", "fifo", "G1_gate", "G1_TC",
"UI2_TC", "UPDATE", "UC_TC", "BC_TC",
"start1", "overrun", "start", "bc_tc_error",
"fifo_empty", "fifo_half_full", "fifo_full", "interrupt_b"
};
static void ni_mio_print_status_b(int status)
{
int i;
printk("B status:");
for (i = 15; i >= 0; i--) {
if (status & (1 << i)) {
printk(" %s", status_b_strings[i]);
}
}
printk("\n");
}
#endif
#ifndef PCIDMA
static void ni_ao_fifo_load(struct comedi_device *dev,
struct comedi_subdevice *s, int n)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
int chan;
int i;
short d;
u32 packed_data;
int range;
int err = 1;
chan = async->cur_chan;
for (i = 0; i < n; i++) {
err &= comedi_buf_get(async, &d);
if (err == 0)
break;
range = CR_RANGE(cmd->chanlist[chan]);
if (boardtype.reg_type & ni_reg_6xxx_mask) {
packed_data = d & 0xffff;
/* 6711 only has 16 bit wide ao fifo */
if (boardtype.reg_type != ni_reg_6711) {
err &= comedi_buf_get(async, &d);
if (err == 0)
break;
chan++;
i++;
packed_data |= (d << 16) & 0xffff0000;
}
ni_writel(packed_data, DAC_FIFO_Data_611x);
} else {
ni_writew(d, DAC_FIFO_Data);
}
chan++;
chan %= cmd->chanlist_len;
}
async->cur_chan = chan;
if (err == 0) {
async->events |= COMEDI_CB_OVERFLOW;
}
}
/*
* There's a small problem if the FIFO gets really low and we
* don't have the data to fill it. Basically, if after we fill
* the FIFO with all the data available, the FIFO is _still_
* less than half full, we never clear the interrupt. If the
* IRQ is in edge mode, we never get another interrupt, because
* this one wasn't cleared. If in level mode, we get flooded
* with interrupts that we can't fulfill, because nothing ever
* gets put into the buffer.
*
* This kind of situation is recoverable, but it is easier to
* just pretend we had a FIFO underrun, since there is a good
* chance it will happen anyway. This is _not_ the case for
* RT code, as RT code might purposely be running close to the
* metal. Needs to be fixed eventually.
*/
static int ni_ao_fifo_half_empty(struct comedi_device *dev,
struct comedi_subdevice *s)
{
int n;
n = comedi_buf_read_n_available(s->async);
if (n == 0) {
s->async->events |= COMEDI_CB_OVERFLOW;
return 0;
}
n /= sizeof(short);
if (n > boardtype.ao_fifo_depth / 2)
n = boardtype.ao_fifo_depth / 2;
ni_ao_fifo_load(dev, s, n);
s->async->events |= COMEDI_CB_BLOCK;
return 1;
}
static int ni_ao_prep_fifo(struct comedi_device *dev,
struct comedi_subdevice *s)
{
int n;
/* reset fifo */
devpriv->stc_writew(dev, 1, DAC_FIFO_Clear);
if (boardtype.reg_type & ni_reg_6xxx_mask)
ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
/* load some data */
n = comedi_buf_read_n_available(s->async);
if (n == 0)
return 0;
n /= sizeof(short);
if (n > boardtype.ao_fifo_depth)
n = boardtype.ao_fifo_depth;
ni_ao_fifo_load(dev, s, n);
return n;
}
static void ni_ai_fifo_read(struct comedi_device *dev,
struct comedi_subdevice *s, int n)
{
struct comedi_async *async = s->async;
int i;
if (boardtype.reg_type == ni_reg_611x) {
short data[2];
u32 dl;
for (i = 0; i < n / 2; i++) {
dl = ni_readl(ADC_FIFO_Data_611x);
/* This may get the hi/lo data in the wrong order */
data[0] = (dl >> 16) & 0xffff;
data[1] = dl & 0xffff;
cfc_write_array_to_buffer(s, data, sizeof(data));
}
/* Check if there's a single sample stuck in the FIFO */
if (n % 2) {
dl = ni_readl(ADC_FIFO_Data_611x);
data[0] = dl & 0xffff;
cfc_write_to_buffer(s, data[0]);
}
} else if (boardtype.reg_type == ni_reg_6143) {
short data[2];
u32 dl;
/* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */
for (i = 0; i < n / 2; i++) {
dl = ni_readl(AIFIFO_Data_6143);
data[0] = (dl >> 16) & 0xffff;
data[1] = dl & 0xffff;
cfc_write_array_to_buffer(s, data, sizeof(data));
}
if (n % 2) {
/* Assume there is a single sample stuck in the FIFO */
ni_writel(0x01, AIFIFO_Control_6143); /* Get stranded sample into FIFO */
dl = ni_readl(AIFIFO_Data_6143);
data[0] = (dl >> 16) & 0xffff;
cfc_write_to_buffer(s, data[0]);
}
} else {
if (n > sizeof(devpriv->ai_fifo_buffer) /
sizeof(devpriv->ai_fifo_buffer[0])) {
comedi_error(dev, "bug! ai_fifo_buffer too small");
async->events |= COMEDI_CB_ERROR;
return;
}
for (i = 0; i < n; i++) {
devpriv->ai_fifo_buffer[i] =
ni_readw(ADC_FIFO_Data_Register);
}
cfc_write_array_to_buffer(s, devpriv->ai_fifo_buffer,
n *
sizeof(devpriv->ai_fifo_buffer[0]));
}
}
static void ni_handle_fifo_half_full(struct comedi_device *dev)
{
int n;
struct comedi_subdevice *s = dev->subdevices + NI_AI_SUBDEV;
n = boardtype.ai_fifo_depth / 2;
ni_ai_fifo_read(dev, s, n);
}
#endif
#ifdef PCIDMA
static int ni_ai_drain_dma(struct comedi_device *dev)
{
int i;
static const int timeout = 10000;
unsigned long flags;
int retval = 0;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan) {
for (i = 0; i < timeout; i++) {
if ((devpriv->stc_readw(dev,
AI_Status_1_Register) &
AI_FIFO_Empty_St)
&& mite_bytes_in_transit(devpriv->ai_mite_chan) ==
0)
break;
udelay(5);
}
if (i == timeout) {
printk("ni_mio_common: wait for dma drain timed out\n");
printk
("mite_bytes_in_transit=%i, AI_Status1_Register=0x%x\n",
mite_bytes_in_transit(devpriv->ai_mite_chan),
devpriv->stc_readw(dev, AI_Status_1_Register));
retval = -1;
}
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
ni_sync_ai_dma(dev);
return retval;
}
#endif
/*
Empties the AI fifo
*/
static void ni_handle_fifo_dregs(struct comedi_device *dev)
{
struct comedi_subdevice *s = dev->subdevices + NI_AI_SUBDEV;
short data[2];
u32 dl;
short fifo_empty;
int i;
if (boardtype.reg_type == ni_reg_611x) {
while ((devpriv->stc_readw(dev,
AI_Status_1_Register) &
AI_FIFO_Empty_St) == 0) {
dl = ni_readl(ADC_FIFO_Data_611x);
/* This may get the hi/lo data in the wrong order */
data[0] = (dl >> 16);
data[1] = (dl & 0xffff);
cfc_write_array_to_buffer(s, data, sizeof(data));
}
} else if (boardtype.reg_type == ni_reg_6143) {
i = 0;
while (ni_readl(AIFIFO_Status_6143) & 0x04) {
dl = ni_readl(AIFIFO_Data_6143);
/* This may get the hi/lo data in the wrong order */
data[0] = (dl >> 16);
data[1] = (dl & 0xffff);
cfc_write_array_to_buffer(s, data, sizeof(data));
i += 2;
}
/* Check if stranded sample is present */
if (ni_readl(AIFIFO_Status_6143) & 0x01) {
ni_writel(0x01, AIFIFO_Control_6143); /* Get stranded sample into FIFO */
dl = ni_readl(AIFIFO_Data_6143);
data[0] = (dl >> 16) & 0xffff;
cfc_write_to_buffer(s, data[0]);
}
} else {
fifo_empty =
devpriv->stc_readw(dev,
AI_Status_1_Register) & AI_FIFO_Empty_St;
while (fifo_empty == 0) {
for (i = 0;
i <
sizeof(devpriv->ai_fifo_buffer) /
sizeof(devpriv->ai_fifo_buffer[0]); i++) {
fifo_empty =
devpriv->stc_readw(dev,
AI_Status_1_Register) &
AI_FIFO_Empty_St;
if (fifo_empty)
break;
devpriv->ai_fifo_buffer[i] =
ni_readw(ADC_FIFO_Data_Register);
}
cfc_write_array_to_buffer(s, devpriv->ai_fifo_buffer,
i *
sizeof(devpriv->
ai_fifo_buffer[0]));
}
}
}
static void get_last_sample_611x(struct comedi_device *dev)
{
struct comedi_subdevice *s = dev->subdevices + NI_AI_SUBDEV;
short data;
u32 dl;
if (boardtype.reg_type != ni_reg_611x)
return;
/* Check if there's a single sample stuck in the FIFO */
if (ni_readb(XXX_Status) & 0x80) {
dl = ni_readl(ADC_FIFO_Data_611x);
data = (dl & 0xffff);
cfc_write_to_buffer(s, data);
}
}
static void get_last_sample_6143(struct comedi_device *dev)
{
struct comedi_subdevice *s = dev->subdevices + NI_AI_SUBDEV;
short data;
u32 dl;
if (boardtype.reg_type != ni_reg_6143)
return;
/* Check if there's a single sample stuck in the FIFO */
if (ni_readl(AIFIFO_Status_6143) & 0x01) {
ni_writel(0x01, AIFIFO_Control_6143); /* Get stranded sample into FIFO */
dl = ni_readl(AIFIFO_Data_6143);
/* This may get the hi/lo data in the wrong order */
data = (dl >> 16) & 0xffff;
cfc_write_to_buffer(s, data);
}
}
static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
void *data, unsigned int num_bytes,
unsigned int chan_index)
{
struct comedi_async *async = s->async;
unsigned int i;
unsigned int length = num_bytes / bytes_per_sample(s);
short *array = data;
unsigned int *larray = data;
for (i = 0; i < length; i++) {
#ifdef PCIDMA
if (s->subdev_flags & SDF_LSAMPL)
larray[i] = le32_to_cpu(larray[i]);
else
array[i] = le16_to_cpu(array[i]);
#endif
if (s->subdev_flags & SDF_LSAMPL)
larray[i] += devpriv->ai_offset[chan_index];
else
array[i] += devpriv->ai_offset[chan_index];
chan_index++;
chan_index %= async->cmd.chanlist_len;
}
}
#ifdef PCIDMA
static int ni_ai_setup_MITE_dma(struct comedi_device *dev)
{
struct comedi_subdevice *s = dev->subdevices + NI_AI_SUBDEV;
int retval;
unsigned long flags;
retval = ni_request_ai_mite_channel(dev);
if (retval)
return retval;
/* printk("comedi_debug: using mite channel %i for ai.\n", devpriv->ai_mite_chan->channel); */
/* write alloc the entire buffer */
comedi_buf_write_alloc(s->async, s->async->prealloc_bufsz);
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan == NULL) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return -EIO;
}
switch (boardtype.reg_type) {
case ni_reg_611x:
case ni_reg_6143:
mite_prep_dma(devpriv->ai_mite_chan, 32, 16);
break;
case ni_reg_628x:
mite_prep_dma(devpriv->ai_mite_chan, 32, 32);
break;
default:
mite_prep_dma(devpriv->ai_mite_chan, 16, 16);
break;
};
/*start the MITE */
mite_dma_arm(devpriv->ai_mite_chan);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
{
struct comedi_subdevice *s = dev->subdevices + NI_AO_SUBDEV;
int retval;
unsigned long flags;
retval = ni_request_ao_mite_channel(dev);
if (retval)
return retval;
/* read alloc the entire buffer */
comedi_buf_read_alloc(s->async, s->async->prealloc_bufsz);
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ao_mite_chan) {
if (boardtype.reg_type & (ni_reg_611x | ni_reg_6713)) {
mite_prep_dma(devpriv->ao_mite_chan, 32, 32);
} else {
/* doing 32 instead of 16 bit wide transfers from memory
makes the mite do 32 bit pci transfers, doubling pci bandwidth. */
mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
}
mite_dma_arm(devpriv->ao_mite_chan);
} else
retval = -EIO;
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return retval;
}
#endif /* PCIDMA */
/*
used for both cancel ioctl and board initialization
this is pretty harsh for a cancel, but it works...
*/
static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s)
{
ni_release_ai_mite_channel(dev);
/* ai configuration */
devpriv->stc_writew(dev, AI_Configuration_Start | AI_Reset,
Joint_Reset_Register);
ni_set_bits(dev, Interrupt_A_Enable_Register,
AI_SC_TC_Interrupt_Enable | AI_START1_Interrupt_Enable |
AI_START2_Interrupt_Enable | AI_START_Interrupt_Enable |
AI_STOP_Interrupt_Enable | AI_Error_Interrupt_Enable |
AI_FIFO_Interrupt_Enable, 0);
ni_clear_ai_fifo(dev);
if (boardtype.reg_type != ni_reg_6143)
ni_writeb(0, Misc_Command);
devpriv->stc_writew(dev, AI_Disarm, AI_Command_1_Register); /* reset pulses */
devpriv->stc_writew(dev,
AI_Start_Stop | AI_Mode_1_Reserved
/*| AI_Trigger_Once */ ,
AI_Mode_1_Register);
devpriv->stc_writew(dev, 0x0000, AI_Mode_2_Register);
/* generate FIFO interrupts on non-empty */
devpriv->stc_writew(dev, (0 << 6) | 0x0000, AI_Mode_3_Register);
if (boardtype.reg_type == ni_reg_611x) {
devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
AI_SOC_Polarity |
AI_LOCALMUX_CLK_Pulse_Width,
AI_Personal_Register);
devpriv->stc_writew(dev,
AI_SCAN_IN_PROG_Output_Select(3) |
AI_EXTMUX_CLK_Output_Select(0) |
AI_LOCALMUX_CLK_Output_Select(2) |
AI_SC_TC_Output_Select(3) |
AI_CONVERT_Output_Select
(AI_CONVERT_Output_Enable_High),
AI_Output_Control_Register);
} else if (boardtype.reg_type == ni_reg_6143) {
devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
AI_SOC_Polarity |
AI_LOCALMUX_CLK_Pulse_Width,
AI_Personal_Register);
devpriv->stc_writew(dev,
AI_SCAN_IN_PROG_Output_Select(3) |
AI_EXTMUX_CLK_Output_Select(0) |
AI_LOCALMUX_CLK_Output_Select(2) |
AI_SC_TC_Output_Select(3) |
AI_CONVERT_Output_Select
(AI_CONVERT_Output_Enable_Low),
AI_Output_Control_Register);
} else {
unsigned ai_output_control_bits;
devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
AI_SOC_Polarity |
AI_CONVERT_Pulse_Width |
AI_LOCALMUX_CLK_Pulse_Width,
AI_Personal_Register);
ai_output_control_bits =
AI_SCAN_IN_PROG_Output_Select(3) |
AI_EXTMUX_CLK_Output_Select(0) |
AI_LOCALMUX_CLK_Output_Select(2) |
AI_SC_TC_Output_Select(3);
if (boardtype.reg_type == ni_reg_622x)
ai_output_control_bits |=
AI_CONVERT_Output_Select
(AI_CONVERT_Output_Enable_High);
else
ai_output_control_bits |=
AI_CONVERT_Output_Select
(AI_CONVERT_Output_Enable_Low);
devpriv->stc_writew(dev, ai_output_control_bits,
AI_Output_Control_Register);
}
/* the following registers should not be changed, because there
* are no backup registers in devpriv. If you want to change
* any of these, add a backup register and other appropriate code:
* AI_Mode_1_Register
* AI_Mode_3_Register
* AI_Personal_Register
* AI_Output_Control_Register
*/
devpriv->stc_writew(dev, AI_SC_TC_Error_Confirm | AI_START_Interrupt_Ack | AI_START2_Interrupt_Ack | AI_START1_Interrupt_Ack | AI_SC_TC_Interrupt_Ack | AI_Error_Interrupt_Ack | AI_STOP_Interrupt_Ack, Interrupt_A_Ack_Register); /* clear interrupts */
devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register);
return 0;
}
static int ni_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
unsigned long flags = 0;
int count;
/* lock to avoid race with interrupt handler */
if (in_interrupt() == 0)
spin_lock_irqsave(&dev->spinlock, flags);
#ifndef PCIDMA
ni_handle_fifo_dregs(dev);
#else
ni_sync_ai_dma(dev);
#endif
count = s->async->buf_write_count - s->async->buf_read_count;
if (in_interrupt() == 0)
spin_unlock_irqrestore(&dev->spinlock, flags);
return count;
}
static int ni_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
int i, n;
const unsigned int mask = (1 << boardtype.adbits) - 1;
unsigned signbits;
unsigned short d;
unsigned long dl;
ni_load_channelgain_list(dev, 1, &insn->chanspec);
ni_clear_ai_fifo(dev);
signbits = devpriv->ai_offset[0];
if (boardtype.reg_type == ni_reg_611x) {
for (n = 0; n < num_adc_stages_611x; n++) {
devpriv->stc_writew(dev, AI_CONVERT_Pulse,
AI_Command_1_Register);
udelay(1);
}
for (n = 0; n < insn->n; n++) {
devpriv->stc_writew(dev, AI_CONVERT_Pulse,
AI_Command_1_Register);
/* The 611x has screwy 32-bit FIFOs. */
d = 0;
for (i = 0; i < NI_TIMEOUT; i++) {
if (ni_readb(XXX_Status) & 0x80) {
d = (ni_readl(ADC_FIFO_Data_611x) >> 16)
& 0xffff;
break;
}
if (!(devpriv->stc_readw(dev,
AI_Status_1_Register) &
AI_FIFO_Empty_St)) {
d = ni_readl(ADC_FIFO_Data_611x) &
0xffff;
break;
}
}
if (i == NI_TIMEOUT) {
printk
("ni_mio_common: timeout in 611x ni_ai_insn_read\n");
return -ETIME;
}
d += signbits;
data[n] = d;
}
} else if (boardtype.reg_type == ni_reg_6143) {
for (n = 0; n < insn->n; n++) {
devpriv->stc_writew(dev, AI_CONVERT_Pulse,
AI_Command_1_Register);
/* The 6143 has 32-bit FIFOs. You need to strobe a bit to move a single 16bit stranded sample into the FIFO */
dl = 0;
for (i = 0; i < NI_TIMEOUT; i++) {
if (ni_readl(AIFIFO_Status_6143) & 0x01) {
ni_writel(0x01, AIFIFO_Control_6143); /* Get stranded sample into FIFO */
dl = ni_readl(AIFIFO_Data_6143);
break;
}
}
if (i == NI_TIMEOUT) {
printk
("ni_mio_common: timeout in 6143 ni_ai_insn_read\n");
return -ETIME;
}
data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF;
}
} else {
for (n = 0; n < insn->n; n++) {
devpriv->stc_writew(dev, AI_CONVERT_Pulse,
AI_Command_1_Register);
for (i = 0; i < NI_TIMEOUT; i++) {
if (!(devpriv->stc_readw(dev,
AI_Status_1_Register) &
AI_FIFO_Empty_St))
break;
}
if (i == NI_TIMEOUT) {
printk
("ni_mio_common: timeout in ni_ai_insn_read\n");
return -ETIME;
}
if (boardtype.reg_type & ni_reg_m_series_mask) {
data[n] =
ni_readl(M_Offset_AI_FIFO_Data) & mask;
} else {
d = ni_readw(ADC_FIFO_Data_Register);
d += signbits; /* subtle: needs to be short addition */
data[n] = d;
}
}
}
return insn->n;
}
void ni_prime_channelgain_list(struct comedi_device *dev)
{
int i;
devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register);
for (i = 0; i < NI_TIMEOUT; ++i) {
if (!(devpriv->stc_readw(dev,
AI_Status_1_Register) &
AI_FIFO_Empty_St)) {
devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
return;
}
udelay(1);
}
printk("ni_mio_common: timeout loading channel/gain list\n");
}
static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
unsigned int n_chan,
unsigned int *list)
{
unsigned int chan, range, aref;
unsigned int i;
unsigned offset;
unsigned int dither;
unsigned range_code;
devpriv->stc_writew(dev, 1, Configuration_Memory_Clear);
/* offset = 1 << (boardtype.adbits - 1); */
if ((list[0] & CR_ALT_SOURCE)) {
unsigned bypass_bits;
chan = CR_CHAN(list[0]);
range = CR_RANGE(list[0]);
range_code = ni_gainlkup[boardtype.gainlkup][range];
dither = ((list[0] & CR_ALT_FILTER) != 0);
bypass_bits = MSeries_AI_Bypass_Config_FIFO_Bit;
bypass_bits |= chan;
bypass_bits |=
(devpriv->ai_calib_source) &
(MSeries_AI_Bypass_Cal_Sel_Pos_Mask |
MSeries_AI_Bypass_Cal_Sel_Neg_Mask |
MSeries_AI_Bypass_Mode_Mux_Mask |
MSeries_AO_Bypass_AO_Cal_Sel_Mask);
bypass_bits |= MSeries_AI_Bypass_Gain_Bits(range_code);
if (dither)
bypass_bits |= MSeries_AI_Bypass_Dither_Bit;
/* don't use 2's complement encoding */
bypass_bits |= MSeries_AI_Bypass_Polarity_Bit;
ni_writel(bypass_bits, M_Offset_AI_Config_FIFO_Bypass);
} else {
ni_writel(0, M_Offset_AI_Config_FIFO_Bypass);
}
offset = 0;
for (i = 0; i < n_chan; i++) {
unsigned config_bits = 0;
chan = CR_CHAN(list[i]);
aref = CR_AREF(list[i]);
range = CR_RANGE(list[i]);
dither = ((list[i] & CR_ALT_FILTER) != 0);
range_code = ni_gainlkup[boardtype.gainlkup][range];
devpriv->ai_offset[i] = offset;
switch (aref) {
case AREF_DIFF:
config_bits |=
MSeries_AI_Config_Channel_Type_Differential_Bits;
break;
case AREF_COMMON:
config_bits |=
MSeries_AI_Config_Channel_Type_Common_Ref_Bits;
break;
case AREF_GROUND:
config_bits |=
MSeries_AI_Config_Channel_Type_Ground_Ref_Bits;
break;
case AREF_OTHER:
break;
}
config_bits |= MSeries_AI_Config_Channel_Bits(chan);
config_bits |=
MSeries_AI_Config_Bank_Bits(boardtype.reg_type, chan);
config_bits |= MSeries_AI_Config_Gain_Bits(range_code);
if (i == n_chan - 1)
config_bits |= MSeries_AI_Config_Last_Channel_Bit;
if (dither)
config_bits |= MSeries_AI_Config_Dither_Bit;
/* don't use 2's complement encoding */
config_bits |= MSeries_AI_Config_Polarity_Bit;
ni_writew(config_bits, M_Offset_AI_Config_FIFO_Data);
}
ni_prime_channelgain_list(dev);
}
/*
* Notes on the 6110 and 6111:
* These boards a slightly different than the rest of the series, since
* they have multiple A/D converters.
* From the driver side, the configuration memory is a
* little different.
* Configuration Memory Low:
* bits 15-9: same
* bit 8: unipolar/bipolar (should be 0 for bipolar)
* bits 0-3: gain. This is 4 bits instead of 3 for the other boards
* 1001 gain=0.1 (+/- 50)
* 1010 0.2
* 1011 0.1
* 0001 1
* 0010 2
* 0011 5
* 0100 10
* 0101 20
* 0110 50
* Configuration Memory High:
* bits 12-14: Channel Type
* 001 for differential
* 000 for calibration
* bit 11: coupling (this is not currently handled)
* 1 AC coupling
* 0 DC coupling
* bits 0-2: channel
* valid channels are 0-3
*/
static void ni_load_channelgain_list(struct comedi_device *dev,
unsigned int n_chan, unsigned int *list)
{
unsigned int chan, range, aref;
unsigned int i;
unsigned int hi, lo;
unsigned offset;
unsigned int dither;
if (boardtype.reg_type & ni_reg_m_series_mask) {
ni_m_series_load_channelgain_list(dev, n_chan, list);
return;
}
if (n_chan == 1 && (boardtype.reg_type != ni_reg_611x)
&& (boardtype.reg_type != ni_reg_6143)) {
if (devpriv->changain_state
&& devpriv->changain_spec == list[0]) {
/* ready to go. */
return;
}
devpriv->changain_state = 1;
devpriv->changain_spec = list[0];
} else {
devpriv->changain_state = 0;
}
devpriv->stc_writew(dev, 1, Configuration_Memory_Clear);
/* Set up Calibration mode if required */
if (boardtype.reg_type == ni_reg_6143) {
if ((list[0] & CR_ALT_SOURCE)
&& !devpriv->ai_calib_source_enabled) {
/* Strobe Relay enable bit */
ni_writew(devpriv->ai_calib_source |
Calibration_Channel_6143_RelayOn,
Calibration_Channel_6143);
ni_writew(devpriv->ai_calib_source,
Calibration_Channel_6143);
devpriv->ai_calib_source_enabled = 1;
msleep_interruptible(100); /* Allow relays to change */
} else if (!(list[0] & CR_ALT_SOURCE)
&& devpriv->ai_calib_source_enabled) {
/* Strobe Relay disable bit */
ni_writew(devpriv->ai_calib_source |
Calibration_Channel_6143_RelayOff,
Calibration_Channel_6143);
ni_writew(devpriv->ai_calib_source,
Calibration_Channel_6143);
devpriv->ai_calib_source_enabled = 0;
msleep_interruptible(100); /* Allow relays to change */
}
}
offset = 1 << (boardtype.adbits - 1);
for (i = 0; i < n_chan; i++) {
if ((boardtype.reg_type != ni_reg_6143)
&& (list[i] & CR_ALT_SOURCE)) {
chan = devpriv->ai_calib_source;
} else {
chan = CR_CHAN(list[i]);
}
aref = CR_AREF(list[i]);
range = CR_RANGE(list[i]);
dither = ((list[i] & CR_ALT_FILTER) != 0);
/* fix the external/internal range differences */
range = ni_gainlkup[boardtype.gainlkup][range];
if (boardtype.reg_type == ni_reg_611x)
devpriv->ai_offset[i] = offset;
else
devpriv->ai_offset[i] = (range & 0x100) ? 0 : offset;
hi = 0;
if ((list[i] & CR_ALT_SOURCE)) {
if (boardtype.reg_type == ni_reg_611x)
ni_writew(CR_CHAN(list[i]) & 0x0003,
Calibration_Channel_Select_611x);
} else {
if (boardtype.reg_type == ni_reg_611x)
aref = AREF_DIFF;
else if (boardtype.reg_type == ni_reg_6143)
aref = AREF_OTHER;
switch (aref) {
case AREF_DIFF:
hi |= AI_DIFFERENTIAL;
break;
case AREF_COMMON:
hi |= AI_COMMON;
break;
case AREF_GROUND:
hi |= AI_GROUND;
break;
case AREF_OTHER:
break;
}
}
hi |= AI_CONFIG_CHANNEL(chan);
ni_writew(hi, Configuration_Memory_High);
if (boardtype.reg_type != ni_reg_6143) {
lo = range;
if (i == n_chan - 1)
lo |= AI_LAST_CHANNEL;
if (dither)
lo |= AI_DITHER;
ni_writew(lo, Configuration_Memory_Low);
}
}
/* prime the channel/gain list */
if ((boardtype.reg_type != ni_reg_611x)
&& (boardtype.reg_type != ni_reg_6143)) {
ni_prime_channelgain_list(dev);
}
}
static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
int round_mode)
{
int divider;
switch (round_mode) {
case TRIG_ROUND_NEAREST:
default:
divider = (nanosec + devpriv->clock_ns / 2) / devpriv->clock_ns;
break;
case TRIG_ROUND_DOWN:
divider = (nanosec) / devpriv->clock_ns;
break;
case TRIG_ROUND_UP:
divider = (nanosec + devpriv->clock_ns - 1) / devpriv->clock_ns;
break;
}
return divider - 1;
}
static unsigned ni_timer_to_ns(const struct comedi_device *dev, int timer)
{
return devpriv->clock_ns * (timer + 1);
}
static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev,
unsigned num_channels)
{
switch (boardtype.reg_type) {
case ni_reg_611x:
case ni_reg_6143:
/* simultaneously-sampled inputs */
return boardtype.ai_speed;
break;
default:
/* multiplexed inputs */
break;
};
return boardtype.ai_speed * num_channels;
}
static int ni_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
int tmp;
int sources;
/* step 1: make sure trigger sources are trivially valid */
if ((cmd->flags & CMDF_WRITE)) {
cmd->flags &= ~CMDF_WRITE;
}
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW | TRIG_INT | TRIG_EXT;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
sources = TRIG_TIMER | TRIG_EXT;
if ((boardtype.reg_type == ni_reg_611x)
|| (boardtype.reg_type == ni_reg_6143))
sources |= TRIG_NOW;
cmd->convert_src &= sources;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/* step 2: make sure trigger sources are unique and mutually compatible */
/* note that mutual compatibility is not an issue here */
if (cmd->start_src != TRIG_NOW &&
cmd->start_src != TRIG_INT && cmd->start_src != TRIG_EXT)
err++;
if (cmd->scan_begin_src != TRIG_TIMER &&
cmd->scan_begin_src != TRIG_EXT &&
cmd->scan_begin_src != TRIG_OTHER)
err++;
if (cmd->convert_src != TRIG_TIMER &&
cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_NOW)
err++;
if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
err++;
if (err)
return 2;
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_src == TRIG_EXT) {
/* external trigger */
unsigned int tmp = CR_CHAN(cmd->start_arg);
if (tmp > 16)
tmp = 16;
tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE));
if (cmd->start_arg != tmp) {
cmd->start_arg = tmp;
err++;
}
} else {
if (cmd->start_arg != 0) {
/* true for both TRIG_NOW and TRIG_INT */
cmd->start_arg = 0;
err++;
}
}
if (cmd->scan_begin_src == TRIG_TIMER) {
if (cmd->scan_begin_arg < ni_min_ai_scan_period_ns(dev,
cmd->
chanlist_len))
{
cmd->scan_begin_arg =
ni_min_ai_scan_period_ns(dev, cmd->chanlist_len);
err++;
}
if (cmd->scan_begin_arg > devpriv->clock_ns * 0xffffff) {
cmd->scan_begin_arg = devpriv->clock_ns * 0xffffff;
err++;
}
} else if (cmd->scan_begin_src == TRIG_EXT) {
/* external trigger */
unsigned int tmp = CR_CHAN(cmd->scan_begin_arg);
if (tmp > 16)
tmp = 16;
tmp |= (cmd->scan_begin_arg & (CR_INVERT | CR_EDGE));
if (cmd->scan_begin_arg != tmp) {
cmd->scan_begin_arg = tmp;
err++;
}
} else { /* TRIG_OTHER */
if (cmd->scan_begin_arg) {
cmd->scan_begin_arg = 0;
err++;
}
}
if (cmd->convert_src == TRIG_TIMER) {
if ((boardtype.reg_type == ni_reg_611x)
|| (boardtype.reg_type == ni_reg_6143)) {
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
} else {
if (cmd->convert_arg < boardtype.ai_speed) {
cmd->convert_arg = boardtype.ai_speed;
err++;
}
if (cmd->convert_arg > devpriv->clock_ns * 0xffff) {
cmd->convert_arg = devpriv->clock_ns * 0xffff;
err++;
}
}
} else if (cmd->convert_src == TRIG_EXT) {
/* external trigger */
unsigned int tmp = CR_CHAN(cmd->convert_arg);
if (tmp > 16)
tmp = 16;
tmp |= (cmd->convert_arg & (CR_ALT_FILTER | CR_INVERT));
if (cmd->convert_arg != tmp) {
cmd->convert_arg = tmp;
err++;
}
} else if (cmd->convert_src == TRIG_NOW) {
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
}
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_COUNT) {
unsigned int max_count = 0x01000000;
if (boardtype.reg_type == ni_reg_611x)
max_count -= num_adc_stages_611x;
if (cmd->stop_arg > max_count) {
cmd->stop_arg = max_count;
err++;
}
if (cmd->stop_arg < 1) {
cmd->stop_arg = 1;
err++;
}
} else {
/* TRIG_NONE */
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->scan_begin_src == TRIG_TIMER) {
tmp = cmd->scan_begin_arg;
cmd->scan_begin_arg =
ni_timer_to_ns(dev, ni_ns_to_timer(dev,
cmd->scan_begin_arg,
cmd->
flags &
TRIG_ROUND_MASK));
if (tmp != cmd->scan_begin_arg)
err++;
}
if (cmd->convert_src == TRIG_TIMER) {
if ((boardtype.reg_type != ni_reg_611x)
&& (boardtype.reg_type != ni_reg_6143)) {
tmp = cmd->convert_arg;
cmd->convert_arg =
ni_timer_to_ns(dev, ni_ns_to_timer(dev,
cmd->convert_arg,
cmd->
flags &
TRIG_ROUND_MASK));
if (tmp != cmd->convert_arg)
err++;
if (cmd->scan_begin_src == TRIG_TIMER &&
cmd->scan_begin_arg <
cmd->convert_arg * cmd->scan_end_arg) {
cmd->scan_begin_arg =
cmd->convert_arg * cmd->scan_end_arg;
err++;
}
}
}
if (err)
return 4;
return 0;
}
static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
const struct comedi_cmd *cmd = &s->async->cmd;
int timer;
int mode1 = 0; /* mode1 is needed for both stop and convert */
int mode2 = 0;
int start_stop_select = 0;
unsigned int stop_count;
int interrupt_a_enable = 0;
MDPRINTK("ni_ai_cmd\n");
if (dev->irq == 0) {
comedi_error(dev, "cannot run command without an irq");
return -EIO;
}
ni_clear_ai_fifo(dev);
ni_load_channelgain_list(dev, cmd->chanlist_len, cmd->chanlist);
/* start configuration */
devpriv->stc_writew(dev, AI_Configuration_Start, Joint_Reset_Register);
/* disable analog triggering for now, since it
* interferes with the use of pfi0 */
devpriv->an_trig_etc_reg &= ~Analog_Trigger_Enable;
devpriv->stc_writew(dev, devpriv->an_trig_etc_reg,
Analog_Trigger_Etc_Register);
switch (cmd->start_src) {
case TRIG_INT:
case TRIG_NOW:
devpriv->stc_writew(dev, AI_START2_Select(0) |
AI_START1_Sync | AI_START1_Edge |
AI_START1_Select(0),
AI_Trigger_Select_Register);
break;
case TRIG_EXT:
{
int chan = CR_CHAN(cmd->start_arg);
unsigned int bits = AI_START2_Select(0) |
AI_START1_Sync | AI_START1_Select(chan + 1);
if (cmd->start_arg & CR_INVERT)
bits |= AI_START1_Polarity;
if (cmd->start_arg & CR_EDGE)
bits |= AI_START1_Edge;
devpriv->stc_writew(dev, bits,
AI_Trigger_Select_Register);
break;
}
}
mode2 &= ~AI_Pre_Trigger;
mode2 &= ~AI_SC_Initial_Load_Source;
mode2 &= ~AI_SC_Reload_Mode;
devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
if (cmd->chanlist_len == 1 || (boardtype.reg_type == ni_reg_611x)
|| (boardtype.reg_type == ni_reg_6143)) {
start_stop_select |= AI_STOP_Polarity;
start_stop_select |= AI_STOP_Select(31); /* logic low */
start_stop_select |= AI_STOP_Sync;
} else {
start_stop_select |= AI_STOP_Select(19); /* ai configuration memory */
}
devpriv->stc_writew(dev, start_stop_select,
AI_START_STOP_Select_Register);
devpriv->ai_cmd2 = 0;
switch (cmd->stop_src) {
case TRIG_COUNT:
stop_count = cmd->stop_arg - 1;
if (boardtype.reg_type == ni_reg_611x) {
/* have to take 3 stage adc pipeline into account */
stop_count += num_adc_stages_611x;
}
/* stage number of scans */
devpriv->stc_writel(dev, stop_count, AI_SC_Load_A_Registers);
mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Trigger_Once;
devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
/* load SC (Scan Count) */
devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register);
devpriv->ai_continuous = 0;
if (stop_count == 0) {
devpriv->ai_cmd2 |= AI_End_On_End_Of_Scan;
interrupt_a_enable |= AI_STOP_Interrupt_Enable;
/* this is required to get the last sample for chanlist_len > 1, not sure why */
if (cmd->chanlist_len > 1)
start_stop_select |=
AI_STOP_Polarity | AI_STOP_Edge;
}
break;
case TRIG_NONE:
/* stage number of scans */
devpriv->stc_writel(dev, 0, AI_SC_Load_A_Registers);
mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Continuous;
devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
/* load SC (Scan Count) */
devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register);
devpriv->ai_continuous = 1;
break;
}
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
/*
stop bits for non 611x boards
AI_SI_Special_Trigger_Delay=0
AI_Pre_Trigger=0
AI_START_STOP_Select_Register:
AI_START_Polarity=0 (?) rising edge
AI_START_Edge=1 edge triggered
AI_START_Sync=1 (?)
AI_START_Select=0 SI_TC
AI_STOP_Polarity=0 rising edge
AI_STOP_Edge=0 level
AI_STOP_Sync=1
AI_STOP_Select=19 external pin (configuration mem)
*/
start_stop_select |= AI_START_Edge | AI_START_Sync;
devpriv->stc_writew(dev, start_stop_select,
AI_START_STOP_Select_Register);
mode2 |= AI_SI_Reload_Mode(0);
/* AI_SI_Initial_Load_Source=A */
mode2 &= ~AI_SI_Initial_Load_Source;
/* mode2 |= AI_SC_Reload_Mode; */
devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
/* load SI */
timer = ni_ns_to_timer(dev, cmd->scan_begin_arg,
TRIG_ROUND_NEAREST);
devpriv->stc_writel(dev, timer, AI_SI_Load_A_Registers);
devpriv->stc_writew(dev, AI_SI_Load, AI_Command_1_Register);
break;
case TRIG_EXT:
if (cmd->scan_begin_arg & CR_EDGE)
start_stop_select |= AI_START_Edge;
/* AI_START_Polarity==1 is falling edge */
if (cmd->scan_begin_arg & CR_INVERT)
start_stop_select |= AI_START_Polarity;
if (cmd->scan_begin_src != cmd->convert_src ||
(cmd->scan_begin_arg & ~CR_EDGE) !=
(cmd->convert_arg & ~CR_EDGE))
start_stop_select |= AI_START_Sync;
start_stop_select |=
AI_START_Select(1 + CR_CHAN(cmd->scan_begin_arg));
devpriv->stc_writew(dev, start_stop_select,
AI_START_STOP_Select_Register);
break;
}
switch (cmd->convert_src) {
case TRIG_TIMER:
case TRIG_NOW:
if (cmd->convert_arg == 0 || cmd->convert_src == TRIG_NOW)
timer = 1;
else
timer = ni_ns_to_timer(dev, cmd->convert_arg,
TRIG_ROUND_NEAREST);
devpriv->stc_writew(dev, 1, AI_SI2_Load_A_Register); /* 0,0 does not work. */
devpriv->stc_writew(dev, timer, AI_SI2_Load_B_Register);
/* AI_SI2_Reload_Mode = alternate */
/* AI_SI2_Initial_Load_Source = A */
mode2 &= ~AI_SI2_Initial_Load_Source;
mode2 |= AI_SI2_Reload_Mode;
devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
/* AI_SI2_Load */
devpriv->stc_writew(dev, AI_SI2_Load, AI_Command_1_Register);
mode2 |= AI_SI2_Reload_Mode; /* alternate */
mode2 |= AI_SI2_Initial_Load_Source; /* B */
devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
break;
case TRIG_EXT:
mode1 |= AI_CONVERT_Source_Select(1 + cmd->convert_arg);
if ((cmd->convert_arg & CR_INVERT) == 0)
mode1 |= AI_CONVERT_Source_Polarity;
devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
mode2 |= AI_Start_Stop_Gate_Enable | AI_SC_Gate_Enable;
devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
break;
}
if (dev->irq) {
/* interrupt on FIFO, errors, SC_TC */
interrupt_a_enable |= AI_Error_Interrupt_Enable |
AI_SC_TC_Interrupt_Enable;
#ifndef PCIDMA
interrupt_a_enable |= AI_FIFO_Interrupt_Enable;
#endif
if (cmd->flags & TRIG_WAKE_EOS
|| (devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
/* wake on end-of-scan */
devpriv->aimode = AIMODE_SCAN;
} else {
devpriv->aimode = AIMODE_HALF_FULL;
}
switch (devpriv->aimode) {
case AIMODE_HALF_FULL:
/*generate FIFO interrupts and DMA requests on half-full */
#ifdef PCIDMA
devpriv->stc_writew(dev, AI_FIFO_Mode_HF_to_E,
AI_Mode_3_Register);
#else
devpriv->stc_writew(dev, AI_FIFO_Mode_HF,
AI_Mode_3_Register);
#endif
break;
case AIMODE_SAMPLE:
/*generate FIFO interrupts on non-empty */
devpriv->stc_writew(dev, AI_FIFO_Mode_NE,
AI_Mode_3_Register);
break;
case AIMODE_SCAN:
#ifdef PCIDMA
devpriv->stc_writew(dev, AI_FIFO_Mode_NE,
AI_Mode_3_Register);
#else
devpriv->stc_writew(dev, AI_FIFO_Mode_HF,
AI_Mode_3_Register);
#endif
interrupt_a_enable |= AI_STOP_Interrupt_Enable;
break;
default:
break;
}
devpriv->stc_writew(dev, AI_Error_Interrupt_Ack | AI_STOP_Interrupt_Ack | AI_START_Interrupt_Ack | AI_START2_Interrupt_Ack | AI_START1_Interrupt_Ack | AI_SC_TC_Interrupt_Ack | AI_SC_TC_Error_Confirm, Interrupt_A_Ack_Register); /* clear interrupts */
ni_set_bits(dev, Interrupt_A_Enable_Register,
interrupt_a_enable, 1);
MDPRINTK("Interrupt_A_Enable_Register = 0x%04x\n",
devpriv->int_a_enable_reg);
} else {
/* interrupt on nothing */
ni_set_bits(dev, Interrupt_A_Enable_Register, ~0, 0);
/* XXX start polling if necessary */
MDPRINTK("interrupting on nothing\n");
}
/* end configuration */
devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register);
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
devpriv->stc_writew(dev,
AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm |
AI_SC_Arm, AI_Command_1_Register);
break;
case TRIG_EXT:
/* XXX AI_SI_Arm? */
devpriv->stc_writew(dev,
AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm |
AI_SC_Arm, AI_Command_1_Register);
break;
}
#ifdef PCIDMA
{
int retval = ni_ai_setup_MITE_dma(dev);
if (retval)
return retval;
}
/* mite_dump_regs(devpriv->mite); */
#endif
switch (cmd->start_src) {
case TRIG_NOW:
/* AI_START1_Pulse */
devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2,
AI_Command_2_Register);
s->async->inttrig = NULL;
break;
case TRIG_EXT:
s->async->inttrig = NULL;
break;
case TRIG_INT:
s->async->inttrig = &ni_ai_inttrig;
break;
}
MDPRINTK("exit ni_ai_cmd\n");
return 0;
}
static int ni_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum)
{
if (trignum != 0)
return -EINVAL;
devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2,
AI_Command_2_Register);
s->async->inttrig = NULL;
return 1;
}
static int ni_ai_config_analog_trig(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data);
static int ni_ai_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n < 1)
return -EINVAL;
switch (data[0]) {
case INSN_CONFIG_ANALOG_TRIG:
return ni_ai_config_analog_trig(dev, s, insn, data);
case INSN_CONFIG_ALT_SOURCE:
if (boardtype.reg_type & ni_reg_m_series_mask) {
if (data[1] & ~(MSeries_AI_Bypass_Cal_Sel_Pos_Mask |
MSeries_AI_Bypass_Cal_Sel_Neg_Mask |
MSeries_AI_Bypass_Mode_Mux_Mask |
MSeries_AO_Bypass_AO_Cal_Sel_Mask)) {
return -EINVAL;
}
devpriv->ai_calib_source = data[1];
} else if (boardtype.reg_type == ni_reg_6143) {
unsigned int calib_source;
calib_source = data[1] & 0xf;
if (calib_source > 0xF)
return -EINVAL;
devpriv->ai_calib_source = calib_source;
ni_writew(calib_source, Calibration_Channel_6143);
} else {
unsigned int calib_source;
unsigned int calib_source_adjust;
calib_source = data[1] & 0xf;
calib_source_adjust = (data[1] >> 4) & 0xff;
if (calib_source >= 8)
return -EINVAL;
devpriv->ai_calib_source = calib_source;
if (boardtype.reg_type == ni_reg_611x) {
ni_writeb(calib_source_adjust,
Cal_Gain_Select_611x);
}
}
return 2;
default:
break;
}
return -EINVAL;
}
static int ni_ai_config_analog_trig(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int a, b, modebits;
int err = 0;
/* data[1] is flags
* data[2] is analog line
* data[3] is set level
* data[4] is reset level */
if (!boardtype.has_analog_trig)
return -EINVAL;
if ((data[1] & 0xffff0000) != COMEDI_EV_SCAN_BEGIN) {
data[1] &= (COMEDI_EV_SCAN_BEGIN | 0xffff);
err++;
}
if (data[2] >= boardtype.n_adchan) {
data[2] = boardtype.n_adchan - 1;
err++;
}
if (data[3] > 255) { /* a */
data[3] = 255;
err++;
}
if (data[4] > 255) { /* b */
data[4] = 255;
err++;
}
/*
* 00 ignore
* 01 set
* 10 reset
*
* modes:
* 1 level: +b- +a-
* high mode 00 00 01 10
* low mode 00 00 10 01
* 2 level: (a<b)
* hysteresis low mode 10 00 00 01
* hysteresis high mode 01 00 00 10
* middle mode 10 01 01 10
*/
a = data[3];
b = data[4];
modebits = data[1] & 0xff;
if (modebits & 0xf0) {
/* two level mode */
if (b < a) {
/* swap order */
a = data[4];
b = data[3];
modebits =
((data[1] & 0xf) << 4) | ((data[1] & 0xf0) >> 4);
}
devpriv->atrig_low = a;
devpriv->atrig_high = b;
switch (modebits) {
case 0x81: /* low hysteresis mode */
devpriv->atrig_mode = 6;
break;
case 0x42: /* high hysteresis mode */
devpriv->atrig_mode = 3;
break;
case 0x96: /* middle window mode */
devpriv->atrig_mode = 2;
break;
default:
data[1] &= ~0xff;
err++;
}
} else {
/* one level mode */
if (b != 0) {
data[4] = 0;
err++;
}
switch (modebits) {
case 0x06: /* high window mode */
devpriv->atrig_high = a;
devpriv->atrig_mode = 0;
break;
case 0x09: /* low window mode */
devpriv->atrig_low = a;
devpriv->atrig_mode = 1;
break;
default:
data[1] &= ~0xff;
err++;
}
}
if (err)
return -EAGAIN;
return 5;
}
/* munge data from unsigned to 2's complement for analog output bipolar modes */
static void ni_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s,
void *data, unsigned int num_bytes,
unsigned int chan_index)
{
struct comedi_async *async = s->async;
unsigned int range;
unsigned int i;
unsigned int offset;
unsigned int length = num_bytes / sizeof(short);
short *array = data;
offset = 1 << (boardtype.aobits - 1);
for (i = 0; i < length; i++) {
range = CR_RANGE(async->cmd.chanlist[chan_index]);
if (boardtype.ao_unipolar == 0 || (range & 1) == 0)
array[i] -= offset;
#ifdef PCIDMA
array[i] = cpu_to_le16(array[i]);
#endif
chan_index++;
chan_index %= async->cmd.chanlist_len;
}
}
static int ni_m_series_ao_config_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int chanspec[],
unsigned int n_chans, int timed)
{
unsigned int range;
unsigned int chan;
unsigned int conf;
int i;
int invert = 0;
if (timed) {
for (i = 0; i < boardtype.n_aochan; ++i) {
devpriv->ao_conf[i] &= ~MSeries_AO_Update_Timed_Bit;
ni_writeb(devpriv->ao_conf[i],
M_Offset_AO_Config_Bank(i));
ni_writeb(0xf, M_Offset_AO_Waveform_Order(i));
}
}
for (i = 0; i < n_chans; i++) {
const struct comedi_krange *krange;
chan = CR_CHAN(chanspec[i]);
range = CR_RANGE(chanspec[i]);
krange = s->range_table->range + range;
invert = 0;
conf = 0;
switch (krange->max - krange->min) {
case 20000000:
conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits;
ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan));
break;
case 10000000:
conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits;
ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan));
break;
case 4000000:
conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits;
ni_writeb(MSeries_Attenuate_x5_Bit,
M_Offset_AO_Reference_Attenuation(chan));
break;
case 2000000:
conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits;
ni_writeb(MSeries_Attenuate_x5_Bit,
M_Offset_AO_Reference_Attenuation(chan));
break;
default:
printk("%s: bug! unhandled ao reference voltage\n",
__func__);
break;
}
switch (krange->max + krange->min) {
case 0:
conf |= MSeries_AO_DAC_Offset_0V_Bits;
break;
case 10000000:
conf |= MSeries_AO_DAC_Offset_5V_Bits;
break;
default:
printk("%s: bug! unhandled ao offset voltage\n",
__func__);
break;
}
if (timed)
conf |= MSeries_AO_Update_Timed_Bit;
ni_writeb(conf, M_Offset_AO_Config_Bank(chan));
devpriv->ao_conf[chan] = conf;
ni_writeb(i, M_Offset_AO_Waveform_Order(chan));
}
return invert;
}
static int ni_old_ao_config_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int chanspec[],
unsigned int n_chans)
{
unsigned int range;
unsigned int chan;
unsigned int conf;
int i;
int invert = 0;
for (i = 0; i < n_chans; i++) {
chan = CR_CHAN(chanspec[i]);
range = CR_RANGE(chanspec[i]);
conf = AO_Channel(chan);
if (boardtype.ao_unipolar) {
if ((range & 1) == 0) {
conf |= AO_Bipolar;
invert = (1 << (boardtype.aobits - 1));
} else {
invert = 0;
}
if (range & 2)
conf |= AO_Ext_Ref;
} else {
conf |= AO_Bipolar;
invert = (1 << (boardtype.aobits - 1));
}
/* not all boards can deglitch, but this shouldn't hurt */
if (chanspec[i] & CR_DEGLITCH)
conf |= AO_Deglitch;
/* analog reference */
/* AREF_OTHER connects AO ground to AI ground, i think */
conf |= (CR_AREF(chanspec[i]) ==
AREF_OTHER) ? AO_Ground_Ref : 0;
ni_writew(conf, AO_Configuration);
devpriv->ao_conf[chan] = conf;
}
return invert;
}
static int ni_ao_config_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int chanspec[], unsigned int n_chans,
int timed)
{
if (boardtype.reg_type & ni_reg_m_series_mask)
return ni_m_series_ao_config_chanlist(dev, s, chanspec, n_chans,
timed);
else
return ni_old_ao_config_chanlist(dev, s, chanspec, n_chans);
}
static int ni_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
data[0] = devpriv->ao[CR_CHAN(insn->chanspec)];
return 1;
}
static int ni_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int invert;
invert = ni_ao_config_chanlist(dev, s, &insn->chanspec, 1, 0);
devpriv->ao[chan] = data[0];
if (boardtype.reg_type & ni_reg_m_series_mask) {
ni_writew(data[0], M_Offset_DAC_Direct_Data(chan));
} else
ni_writew(data[0] ^ invert,
(chan) ? DAC1_Direct_Data : DAC0_Direct_Data);
return 1;
}
static int ni_ao_insn_write_671x(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int invert;
ao_win_out(1 << chan, AO_Immediate_671x);
invert = 1 << (boardtype.aobits - 1);
ni_ao_config_chanlist(dev, s, &insn->chanspec, 1, 0);
devpriv->ao[chan] = data[0];
ao_win_out(data[0] ^ invert, DACx_Direct_Data_671x(chan));
return 1;
}
static int ni_ao_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
switch (data[0]) {
case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
switch (data[1]) {
case COMEDI_OUTPUT:
data[2] = 1 + boardtype.ao_fifo_depth * sizeof(short);
if (devpriv->mite)
data[2] += devpriv->mite->fifo_size;
break;
case COMEDI_INPUT:
data[2] = 0;
break;
default:
return -EINVAL;
break;
}
return 0;
default:
break;
}
return -EINVAL;
}
static int ni_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum)
{
int ret;
int interrupt_b_bits;
int i;
static const int timeout = 1000;
if (trignum != 0)
return -EINVAL;
/* Null trig at beginning prevent ao start trigger from executing more than
once per command (and doing things like trying to allocate the ao dma channel
multiple times) */
s->async->inttrig = NULL;
ni_set_bits(dev, Interrupt_B_Enable_Register,
AO_FIFO_Interrupt_Enable | AO_Error_Interrupt_Enable, 0);
interrupt_b_bits = AO_Error_Interrupt_Enable;
#ifdef PCIDMA
devpriv->stc_writew(dev, 1, DAC_FIFO_Clear);
if (boardtype.reg_type & ni_reg_6xxx_mask)
ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
ret = ni_ao_setup_MITE_dma(dev);
if (ret)
return ret;
ret = ni_ao_wait_for_dma_load(dev);
if (ret < 0)
return ret;
#else
ret = ni_ao_prep_fifo(dev, s);
if (ret == 0)
return -EPIPE;
interrupt_b_bits |= AO_FIFO_Interrupt_Enable;
#endif
devpriv->stc_writew(dev, devpriv->ao_mode3 | AO_Not_An_UPDATE,
AO_Mode_3_Register);
devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
/* wait for DACs to be loaded */
for (i = 0; i < timeout; i++) {
udelay(1);
if ((devpriv->stc_readw(dev,
Joint_Status_2_Register) &
AO_TMRDACWRs_In_Progress_St) == 0)
break;
}
if (i == timeout) {
comedi_error(dev,
"timed out waiting for AO_TMRDACWRs_In_Progress_St to clear");
return -EIO;
}
/* stc manual says we are need to clear error interrupt after AO_TMRDACWRs_In_Progress_St clears */
devpriv->stc_writew(dev, AO_Error_Interrupt_Ack,
Interrupt_B_Ack_Register);
ni_set_bits(dev, Interrupt_B_Enable_Register, interrupt_b_bits, 1);
devpriv->stc_writew(dev,
devpriv->ao_cmd1 | AO_UI_Arm | AO_UC_Arm | AO_BC_Arm
| AO_DAC1_Update_Mode | AO_DAC0_Update_Mode,
AO_Command_1_Register);
devpriv->stc_writew(dev, devpriv->ao_cmd2 | AO_START1_Pulse,
AO_Command_2_Register);
return 0;
}
static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
const struct comedi_cmd *cmd = &s->async->cmd;
int bits;
int i;
unsigned trigvar;
if (dev->irq == 0) {
comedi_error(dev, "cannot run command without an irq");
return -EIO;
}
devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register);
devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register);
if (boardtype.reg_type & ni_reg_6xxx_mask) {
ao_win_out(CLEAR_WG, AO_Misc_611x);
bits = 0;
for (i = 0; i < cmd->chanlist_len; i++) {
int chan;
chan = CR_CHAN(cmd->chanlist[i]);
bits |= 1 << chan;
ao_win_out(chan, AO_Waveform_Generation_611x);
}
ao_win_out(bits, AO_Timed_611x);
}
ni_ao_config_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len, 1);
if (cmd->stop_src == TRIG_NONE) {
devpriv->ao_mode1 |= AO_Continuous;
devpriv->ao_mode1 &= ~AO_Trigger_Once;
} else {
devpriv->ao_mode1 &= ~AO_Continuous;
devpriv->ao_mode1 |= AO_Trigger_Once;
}
devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
switch (cmd->start_src) {
case TRIG_INT:
case TRIG_NOW:
devpriv->ao_trigger_select &=
~(AO_START1_Polarity | AO_START1_Select(-1));
devpriv->ao_trigger_select |= AO_START1_Edge | AO_START1_Sync;
devpriv->stc_writew(dev, devpriv->ao_trigger_select,
AO_Trigger_Select_Register);
break;
case TRIG_EXT:
devpriv->ao_trigger_select =
AO_START1_Select(CR_CHAN(cmd->start_arg) + 1);
if (cmd->start_arg & CR_INVERT)
devpriv->ao_trigger_select |= AO_START1_Polarity; /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */
if (cmd->start_arg & CR_EDGE)
devpriv->ao_trigger_select |= AO_START1_Edge; /* 0=edge detection disabled, 1=enabled */
devpriv->stc_writew(dev, devpriv->ao_trigger_select,
AO_Trigger_Select_Register);
break;
default:
BUG();
break;
}
devpriv->ao_mode3 &= ~AO_Trigger_Length;
devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
devpriv->ao_mode2 &= ~AO_BC_Initial_Load_Source;
devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
if (cmd->stop_src == TRIG_NONE) {
devpriv->stc_writel(dev, 0xffffff, AO_BC_Load_A_Register);
} else {
devpriv->stc_writel(dev, 0, AO_BC_Load_A_Register);
}
devpriv->stc_writew(dev, AO_BC_Load, AO_Command_1_Register);
devpriv->ao_mode2 &= ~AO_UC_Initial_Load_Source;
devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
switch (cmd->stop_src) {
case TRIG_COUNT:
if (boardtype.reg_type & ni_reg_m_series_mask) {
/* this is how the NI example code does it for m-series boards, verified correct with 6259 */
devpriv->stc_writel(dev, cmd->stop_arg - 1,
AO_UC_Load_A_Register);
devpriv->stc_writew(dev, AO_UC_Load,
AO_Command_1_Register);
} else {
devpriv->stc_writel(dev, cmd->stop_arg,
AO_UC_Load_A_Register);
devpriv->stc_writew(dev, AO_UC_Load,
AO_Command_1_Register);
devpriv->stc_writel(dev, cmd->stop_arg - 1,
AO_UC_Load_A_Register);
}
break;
case TRIG_NONE:
devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register);
devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register);
break;
default:
devpriv->stc_writel(dev, 0, AO_UC_Load_A_Register);
devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register);
}
devpriv->ao_mode1 &=
~(AO_UI_Source_Select(0x1f) | AO_UI_Source_Polarity |
AO_UPDATE_Source_Select(0x1f) | AO_UPDATE_Source_Polarity);
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
devpriv->ao_cmd2 &= ~AO_BC_Gate_Enable;
trigvar =
ni_ns_to_timer(dev, cmd->scan_begin_arg,
TRIG_ROUND_NEAREST);
devpriv->stc_writel(dev, 1, AO_UI_Load_A_Register);
devpriv->stc_writew(dev, AO_UI_Load, AO_Command_1_Register);
devpriv->stc_writel(dev, trigvar, AO_UI_Load_A_Register);
break;
case TRIG_EXT:
devpriv->ao_mode1 |=
AO_UPDATE_Source_Select(cmd->scan_begin_arg);
if (cmd->scan_begin_arg & CR_INVERT)
devpriv->ao_mode1 |= AO_UPDATE_Source_Polarity;
devpriv->ao_cmd2 |= AO_BC_Gate_Enable;
break;
default:
BUG();
break;
}
devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register);
devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
devpriv->ao_mode2 &=
~(AO_UI_Reload_Mode(3) | AO_UI_Initial_Load_Source);
devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
if (cmd->scan_end_arg > 1) {
devpriv->ao_mode1 |= AO_Multiple_Channels;
devpriv->stc_writew(dev,
AO_Number_Of_Channels(cmd->scan_end_arg -
1) |
AO_UPDATE_Output_Select
(AO_Update_Output_High_Z),
AO_Output_Control_Register);
} else {
unsigned bits;
devpriv->ao_mode1 &= ~AO_Multiple_Channels;
bits = AO_UPDATE_Output_Select(AO_Update_Output_High_Z);
if (boardtype.
reg_type & (ni_reg_m_series_mask | ni_reg_6xxx_mask)) {
bits |= AO_Number_Of_Channels(0);
} else {
bits |=
AO_Number_Of_Channels(CR_CHAN(cmd->chanlist[0]));
}
devpriv->stc_writew(dev, bits, AO_Output_Control_Register);
}
devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
devpriv->stc_writew(dev, AO_DAC0_Update_Mode | AO_DAC1_Update_Mode,
AO_Command_1_Register);
devpriv->ao_mode3 |= AO_Stop_On_Overrun_Error;
devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
devpriv->ao_mode2 &= ~AO_FIFO_Mode_Mask;
#ifdef PCIDMA
devpriv->ao_mode2 |= AO_FIFO_Mode_HF_to_F;
#else
devpriv->ao_mode2 |= AO_FIFO_Mode_HF;
#endif
devpriv->ao_mode2 &= ~AO_FIFO_Retransmit_Enable;
devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
bits = AO_BC_Source_Select | AO_UPDATE_Pulse_Width |
AO_TMRDACWR_Pulse_Width;
if (boardtype.ao_fifo_depth)
bits |= AO_FIFO_Enable;
else
bits |= AO_DMA_PIO_Control;
#if 0
/* F Hess: windows driver does not set AO_Number_Of_DAC_Packages bit for 6281,
verified with bus analyzer. */
if (boardtype.reg_type & ni_reg_m_series_mask)
bits |= AO_Number_Of_DAC_Packages;
#endif
devpriv->stc_writew(dev, bits, AO_Personal_Register);
/* enable sending of ao dma requests */
devpriv->stc_writew(dev, AO_AOFREQ_Enable, AO_Start_Select_Register);
devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register);
if (cmd->stop_src == TRIG_COUNT) {
devpriv->stc_writew(dev, AO_BC_TC_Interrupt_Ack,
Interrupt_B_Ack_Register);
ni_set_bits(dev, Interrupt_B_Enable_Register,
AO_BC_TC_Interrupt_Enable, 1);
}
s->async->inttrig = &ni_ao_inttrig;
return 0;
}
static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
int tmp;
/* step 1: make sure trigger sources are trivially valid */
if ((cmd->flags & CMDF_WRITE) == 0) {
cmd->flags |= CMDF_WRITE;
}
tmp = cmd->start_src;
cmd->start_src &= TRIG_INT | TRIG_EXT;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_NOW;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/* step 2: make sure trigger sources are unique and mutually compatible */
if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
err++;
if (err)
return 2;
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_src == TRIG_EXT) {
/* external trigger */
unsigned int tmp = CR_CHAN(cmd->start_arg);
if (tmp > 18)
tmp = 18;
tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE));
if (cmd->start_arg != tmp) {
cmd->start_arg = tmp;
err++;
}
} else {
if (cmd->start_arg != 0) {
/* true for both TRIG_NOW and TRIG_INT */
cmd->start_arg = 0;
err++;
}
}
if (cmd->scan_begin_src == TRIG_TIMER) {
if (cmd->scan_begin_arg < boardtype.ao_speed) {
cmd->scan_begin_arg = boardtype.ao_speed;
err++;
}
if (cmd->scan_begin_arg > devpriv->clock_ns * 0xffffff) { /* XXX check */
cmd->scan_begin_arg = devpriv->clock_ns * 0xffffff;
err++;
}
}
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_COUNT) { /* XXX check */
if (cmd->stop_arg > 0x00ffffff) {
cmd->stop_arg = 0x00ffffff;
err++;
}
} else {
/* TRIG_NONE */
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->scan_begin_src == TRIG_TIMER) {
tmp = cmd->scan_begin_arg;
cmd->scan_begin_arg =
ni_timer_to_ns(dev, ni_ns_to_timer(dev,
cmd->scan_begin_arg,
cmd->
flags &
TRIG_ROUND_MASK));
if (tmp != cmd->scan_begin_arg)
err++;
}
if (err)
return 4;
/* step 5: fix up chanlist */
if (err)
return 5;
return 0;
}
static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
{
/* devpriv->ao0p=0x0000; */
/* ni_writew(devpriv->ao0p,AO_Configuration); */
/* devpriv->ao1p=AO_Channel(1); */
/* ni_writew(devpriv->ao1p,AO_Configuration); */
ni_release_ao_mite_channel(dev);
devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register);
devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register);
ni_set_bits(dev, Interrupt_B_Enable_Register, ~0, 0);
devpriv->stc_writew(dev, AO_BC_Source_Select, AO_Personal_Register);
devpriv->stc_writew(dev, 0x3f98, Interrupt_B_Ack_Register);
devpriv->stc_writew(dev, AO_BC_Source_Select | AO_UPDATE_Pulse_Width |
AO_TMRDACWR_Pulse_Width, AO_Personal_Register);
devpriv->stc_writew(dev, 0, AO_Output_Control_Register);
devpriv->stc_writew(dev, 0, AO_Start_Select_Register);
devpriv->ao_cmd1 = 0;
devpriv->stc_writew(dev, devpriv->ao_cmd1, AO_Command_1_Register);
devpriv->ao_cmd2 = 0;
devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register);
devpriv->ao_mode1 = 0;
devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
devpriv->ao_mode2 = 0;
devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
if (boardtype.reg_type & ni_reg_m_series_mask)
devpriv->ao_mode3 = AO_Last_Gate_Disable;
else
devpriv->ao_mode3 = 0;
devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
devpriv->ao_trigger_select = 0;
devpriv->stc_writew(dev, devpriv->ao_trigger_select,
AO_Trigger_Select_Register);
if (boardtype.reg_type & ni_reg_6xxx_mask) {
unsigned immediate_bits = 0;
unsigned i;
for (i = 0; i < s->n_chan; ++i) {
immediate_bits |= 1 << i;
}
ao_win_out(immediate_bits, AO_Immediate_671x);
ao_win_out(CLEAR_WG, AO_Misc_611x);
}
devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register);
return 0;
}
/* digital io */
static int ni_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
#ifdef DEBUG_DIO
printk("ni_dio_insn_config() chan=%d io=%d\n",
CR_CHAN(insn->chanspec), data[0]);
#endif
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
s->io_bits |= 1 << CR_CHAN(insn->chanspec);
break;
case INSN_CONFIG_DIO_INPUT:
s->io_bits &= ~(1 << CR_CHAN(insn->chanspec));
break;
case INSN_CONFIG_DIO_QUERY:
data[1] =
(s->
io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
COMEDI_INPUT;
return insn->n;
break;
default:
return -EINVAL;
}
devpriv->dio_control &= ~DIO_Pins_Dir_Mask;
devpriv->dio_control |= DIO_Pins_Dir(s->io_bits);
devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
return 1;
}
static int ni_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
#ifdef DEBUG_DIO
printk("ni_dio_insn_bits() mask=0x%x bits=0x%x\n", data[0], data[1]);
#endif
if (insn->n != 2)
return -EINVAL;
if (data[0]) {
/* Perform check to make sure we're not using the
serial part of the dio */
if ((data[0] & (DIO_SDIN | DIO_SDOUT))
&& devpriv->serial_interval_ns)
return -EBUSY;
s->state &= ~data[0];
s->state |= (data[0] & data[1]);
devpriv->dio_output &= ~DIO_Parallel_Data_Mask;
devpriv->dio_output |= DIO_Parallel_Data_Out(s->state);
devpriv->stc_writew(dev, devpriv->dio_output,
DIO_Output_Register);
}
data[1] = devpriv->stc_readw(dev, DIO_Parallel_Input_Register);
return 2;
}
static int ni_m_series_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
#ifdef DEBUG_DIO
printk("ni_m_series_dio_insn_config() chan=%d io=%d\n",
CR_CHAN(insn->chanspec), data[0]);
#endif
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
s->io_bits |= 1 << CR_CHAN(insn->chanspec);
break;
case INSN_CONFIG_DIO_INPUT:
s->io_bits &= ~(1 << CR_CHAN(insn->chanspec));
break;
case INSN_CONFIG_DIO_QUERY:
data[1] =
(s->
io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
COMEDI_INPUT;
return insn->n;
break;
default:
return -EINVAL;
}
ni_writel(s->io_bits, M_Offset_DIO_Direction);
return 1;
}
static int ni_m_series_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
#ifdef DEBUG_DIO
printk("ni_m_series_dio_insn_bits() mask=0x%x bits=0x%x\n", data[0],
data[1]);
#endif
if (insn->n != 2)
return -EINVAL;
if (data[0]) {
s->state &= ~data[0];
s->state |= (data[0] & data[1]);
ni_writel(s->state, M_Offset_Static_Digital_Output);
}
data[1] = ni_readl(M_Offset_Static_Digital_Input);
return 2;
}
static int ni_cdio_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
int tmp;
int sources;
unsigned i;
/* step 1: make sure trigger sources are trivially valid */
tmp = cmd->start_src;
sources = TRIG_INT;
cmd->start_src &= sources;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_EXT;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_NOW;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/* step 2: make sure trigger sources are unique... */
if (cmd->start_src != TRIG_INT)
err++;
if (cmd->scan_begin_src != TRIG_EXT)
err++;
if (cmd->convert_src != TRIG_NOW)
err++;
if (cmd->stop_src != TRIG_NONE)
err++;
/* ... and mutually compatible */
if (err)
return 2;
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_src == TRIG_INT) {
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
}
if (cmd->scan_begin_src == TRIG_EXT) {
tmp = cmd->scan_begin_arg;
tmp &= CR_PACK_FLAGS(CDO_Sample_Source_Select_Mask, 0, 0,
CR_INVERT);
if (tmp != cmd->scan_begin_arg) {
err++;
}
}
if (cmd->convert_src == TRIG_NOW) {
if (cmd->convert_arg) {
cmd->convert_arg = 0;
err++;
}
}
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_NONE) {
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (err)
return 3;
/* step 4: fix up any arguments */
if (err)
return 4;
/* step 5: check chanlist */
for (i = 0; i < cmd->chanlist_len; ++i) {
if (cmd->chanlist[i] != i)
err = 1;
}
if (err)
return 5;
return 0;
}
static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
const struct comedi_cmd *cmd = &s->async->cmd;
unsigned cdo_mode_bits = CDO_FIFO_Mode_Bit | CDO_Halt_On_Error_Bit;
int retval;
ni_writel(CDO_Reset_Bit, M_Offset_CDIO_Command);
switch (cmd->scan_begin_src) {
case TRIG_EXT:
cdo_mode_bits |=
CR_CHAN(cmd->scan_begin_arg) &
CDO_Sample_Source_Select_Mask;
break;
default:
BUG();
break;
}
if (cmd->scan_begin_arg & CR_INVERT)
cdo_mode_bits |= CDO_Polarity_Bit;
ni_writel(cdo_mode_bits, M_Offset_CDO_Mode);
if (s->io_bits) {
ni_writel(s->state, M_Offset_CDO_FIFO_Data);
ni_writel(CDO_SW_Update_Bit, M_Offset_CDIO_Command);
ni_writel(s->io_bits, M_Offset_CDO_Mask_Enable);
} else {
comedi_error(dev,
"attempted to run digital output command with no lines configured as outputs");
return -EIO;
}
retval = ni_request_cdo_mite_channel(dev);
if (retval < 0) {
return retval;
}
s->async->inttrig = &ni_cdo_inttrig;
return 0;
}
static int ni_cdo_inttrig(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum)
{
#ifdef PCIDMA
unsigned long flags;
#endif
int retval = 0;
unsigned i;
const unsigned timeout = 1000;
s->async->inttrig = NULL;
/* read alloc the entire buffer */
comedi_buf_read_alloc(s->async, s->async->prealloc_bufsz);
#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
mite_prep_dma(devpriv->cdo_mite_chan, 32, 32);
mite_dma_arm(devpriv->cdo_mite_chan);
} else {
comedi_error(dev, "BUG: no cdo mite channel?");
retval = -EIO;
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
if (retval < 0)
return retval;
#endif
/*
* XXX not sure what interrupt C group does
* ni_writeb(Interrupt_Group_C_Enable_Bit,
* M_Offset_Interrupt_C_Enable); wait for dma to fill output fifo
*/
for (i = 0; i < timeout; ++i) {
if (ni_readl(M_Offset_CDIO_Status) & CDO_FIFO_Full_Bit)
break;
udelay(10);
}
if (i == timeout) {
comedi_error(dev, "dma failed to fill cdo fifo!");
ni_cdio_cancel(dev, s);
return -EIO;
}
ni_writel(CDO_Arm_Bit | CDO_Error_Interrupt_Enable_Set_Bit |
CDO_Empty_FIFO_Interrupt_Enable_Set_Bit,
M_Offset_CDIO_Command);
return retval;
}
static int ni_cdio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
ni_writel(CDO_Disarm_Bit | CDO_Error_Interrupt_Enable_Clear_Bit |
CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit |
CDO_FIFO_Request_Interrupt_Enable_Clear_Bit,
M_Offset_CDIO_Command);
/*
* XXX not sure what interrupt C group does ni_writeb(0,
* M_Offset_Interrupt_C_Enable);
*/
ni_writel(0, M_Offset_CDO_Mask_Enable);
ni_release_cdo_mite_channel(dev);
return 0;
}
static void handle_cdio_interrupt(struct comedi_device *dev)
{
unsigned cdio_status;
struct comedi_subdevice *s = dev->subdevices + NI_DIO_SUBDEV;
#ifdef PCIDMA
unsigned long flags;
#endif
if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) {
return;
}
#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
unsigned cdo_mite_status =
mite_get_status(devpriv->cdo_mite_chan);
if (cdo_mite_status & CHSR_LINKC) {
writel(CHOR_CLRLC,
devpriv->mite->mite_io_addr +
MITE_CHOR(devpriv->cdo_mite_chan->channel));
}
mite_sync_output_dma(devpriv->cdo_mite_chan, s->async);
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
#endif
cdio_status = ni_readl(M_Offset_CDIO_Status);
if (cdio_status & (CDO_Overrun_Bit | CDO_Underflow_Bit)) {
/* printk("cdio error: statux=0x%x\n", cdio_status); */
ni_writel(CDO_Error_Interrupt_Confirm_Bit, M_Offset_CDIO_Command); /* XXX just guessing this is needed and does something useful */
s->async->events |= COMEDI_CB_OVERFLOW;
}
if (cdio_status & CDO_FIFO_Empty_Bit) {
/* printk("cdio fifo empty\n"); */
ni_writel(CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit,
M_Offset_CDIO_Command);
/* s->async->events |= COMEDI_CB_EOA; */
}
ni_event(dev, s);
}
static int ni_serial_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int err = insn->n;
unsigned char byte_out, byte_in = 0;
if (insn->n != 2)
return -EINVAL;
switch (data[0]) {
case INSN_CONFIG_SERIAL_CLOCK:
#ifdef DEBUG_DIO
printk("SPI serial clock Config cd\n", data[1]);
#endif
devpriv->serial_hw_mode = 1;
devpriv->dio_control |= DIO_HW_Serial_Enable;
if (data[1] == SERIAL_DISABLED) {
devpriv->serial_hw_mode = 0;
devpriv->dio_control &= ~(DIO_HW_Serial_Enable |
DIO_Software_Serial_Control);
data[1] = SERIAL_DISABLED;
devpriv->serial_interval_ns = data[1];
} else if (data[1] <= SERIAL_600NS) {
/* Warning: this clock speed is too fast to reliably
control SCXI. */
devpriv->dio_control &= ~DIO_HW_Serial_Timebase;
devpriv->clock_and_fout |= Slow_Internal_Timebase;
devpriv->clock_and_fout &= ~DIO_Serial_Out_Divide_By_2;
data[1] = SERIAL_600NS;
devpriv->serial_interval_ns = data[1];
} else if (data[1] <= SERIAL_1_2US) {
devpriv->dio_control &= ~DIO_HW_Serial_Timebase;
devpriv->clock_and_fout |= Slow_Internal_Timebase |
DIO_Serial_Out_Divide_By_2;
data[1] = SERIAL_1_2US;
devpriv->serial_interval_ns = data[1];
} else if (data[1] <= SERIAL_10US) {
devpriv->dio_control |= DIO_HW_Serial_Timebase;
devpriv->clock_and_fout |= Slow_Internal_Timebase |
DIO_Serial_Out_Divide_By_2;
/* Note: DIO_Serial_Out_Divide_By_2 only affects
600ns/1.2us. If you turn divide_by_2 off with the
slow clock, you will still get 10us, except then
all your delays are wrong. */
data[1] = SERIAL_10US;
devpriv->serial_interval_ns = data[1];
} else {
devpriv->dio_control &= ~(DIO_HW_Serial_Enable |
DIO_Software_Serial_Control);
devpriv->serial_hw_mode = 0;
data[1] = (data[1] / 1000) * 1000;
devpriv->serial_interval_ns = data[1];
}
devpriv->stc_writew(dev, devpriv->dio_control,
DIO_Control_Register);
devpriv->stc_writew(dev, devpriv->clock_and_fout,
Clock_and_FOUT_Register);
return 1;
break;
case INSN_CONFIG_BIDIRECTIONAL_DATA:
if (devpriv->serial_interval_ns == 0) {
return -EINVAL;
}
byte_out = data[1] & 0xFF;
if (devpriv->serial_hw_mode) {
err = ni_serial_hw_readwrite8(dev, s, byte_out,
&byte_in);
} else if (devpriv->serial_interval_ns > 0) {
err = ni_serial_sw_readwrite8(dev, s, byte_out,
&byte_in);
} else {
printk("ni_serial_insn_config: serial disabled!\n");
return -EINVAL;
}
if (err < 0)
return err;
data[1] = byte_in & 0xFF;
return insn->n;
break;
default:
return -EINVAL;
}
}
static int ni_serial_hw_readwrite8(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned char data_out,
unsigned char *data_in)
{
unsigned int status1;
int err = 0, count = 20;
#ifdef DEBUG_DIO
printk("ni_serial_hw_readwrite8: outputting 0x%x\n", data_out);
#endif
devpriv->dio_output &= ~DIO_Serial_Data_Mask;
devpriv->dio_output |= DIO_Serial_Data_Out(data_out);
devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register);
status1 = devpriv->stc_readw(dev, Joint_Status_1_Register);
if (status1 & DIO_Serial_IO_In_Progress_St) {
err = -EBUSY;
goto Error;
}
devpriv->dio_control |= DIO_HW_Serial_Start;
devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
devpriv->dio_control &= ~DIO_HW_Serial_Start;
/* Wait until STC says we're done, but don't loop infinitely. */
while ((status1 =
devpriv->stc_readw(dev,
Joint_Status_1_Register)) &
DIO_Serial_IO_In_Progress_St) {
/* Delay one bit per loop */
udelay((devpriv->serial_interval_ns + 999) / 1000);
if (--count < 0) {
printk
("ni_serial_hw_readwrite8: SPI serial I/O didn't finish in time!\n");
err = -ETIME;
goto Error;
}
}
/* Delay for last bit. This delay is absolutely necessary, because
DIO_Serial_IO_In_Progress_St goes high one bit too early. */
udelay((devpriv->serial_interval_ns + 999) / 1000);
if (data_in != NULL) {
*data_in = devpriv->stc_readw(dev, DIO_Serial_Input_Register);
#ifdef DEBUG_DIO
printk("ni_serial_hw_readwrite8: inputted 0x%x\n", *data_in);
#endif
}
Error:
devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
return err;
}
static int ni_serial_sw_readwrite8(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned char data_out,
unsigned char *data_in)
{
unsigned char mask, input = 0;
#ifdef DEBUG_DIO
printk("ni_serial_sw_readwrite8: outputting 0x%x\n", data_out);
#endif
/* Wait for one bit before transfer */
udelay((devpriv->serial_interval_ns + 999) / 1000);
for (mask = 0x80; mask; mask >>= 1) {
/* Output current bit; note that we cannot touch s->state
because it is a per-subdevice field, and serial is
a separate subdevice from DIO. */
devpriv->dio_output &= ~DIO_SDOUT;
if (data_out & mask) {
devpriv->dio_output |= DIO_SDOUT;
}
devpriv->stc_writew(dev, devpriv->dio_output,
DIO_Output_Register);
/* Assert SDCLK (active low, inverted), wait for half of
the delay, deassert SDCLK, and wait for the other half. */
devpriv->dio_control |= DIO_Software_Serial_Control;
devpriv->stc_writew(dev, devpriv->dio_control,
DIO_Control_Register);
udelay((devpriv->serial_interval_ns + 999) / 2000);
devpriv->dio_control &= ~DIO_Software_Serial_Control;
devpriv->stc_writew(dev, devpriv->dio_control,
DIO_Control_Register);
udelay((devpriv->serial_interval_ns + 999) / 2000);
/* Input current bit */
if (devpriv->stc_readw(dev,
DIO_Parallel_Input_Register) & DIO_SDIN)
{
/* printk("DIO_P_I_R: 0x%x\n", devpriv->stc_readw(dev, DIO_Parallel_Input_Register)); */
input |= mask;
}
}
#ifdef DEBUG_DIO
printk("ni_serial_sw_readwrite8: inputted 0x%x\n", input);
#endif
if (data_in)
*data_in = input;
return 0;
}
static void mio_common_detach(struct comedi_device *dev)
{
if (dev->private) {
if (devpriv->counter_dev) {
ni_gpct_device_destroy(devpriv->counter_dev);
}
}
if (dev->subdevices && boardtype.has_8255)
subdev_8255_cleanup(dev, dev->subdevices + NI_8255_DIO_SUBDEV);
}
static void init_ao_67xx(struct comedi_device *dev, struct comedi_subdevice *s)
{
int i;
for (i = 0; i < s->n_chan; i++) {
ni_ao_win_outw(dev, AO_Channel(i) | 0x0,
AO_Configuration_2_67xx);
}
ao_win_out(0x0, AO_Later_Single_Point_Updates);
}
static unsigned ni_gpct_to_stc_register(enum ni_gpct_register reg)
{
unsigned stc_register;
switch (reg) {
case NITIO_G0_Autoincrement_Reg:
stc_register = G_Autoincrement_Register(0);
break;
case NITIO_G1_Autoincrement_Reg:
stc_register = G_Autoincrement_Register(1);
break;
case NITIO_G0_Command_Reg:
stc_register = G_Command_Register(0);
break;
case NITIO_G1_Command_Reg:
stc_register = G_Command_Register(1);
break;
case NITIO_G0_HW_Save_Reg:
stc_register = G_HW_Save_Register(0);
break;
case NITIO_G1_HW_Save_Reg:
stc_register = G_HW_Save_Register(1);
break;
case NITIO_G0_SW_Save_Reg:
stc_register = G_Save_Register(0);
break;
case NITIO_G1_SW_Save_Reg:
stc_register = G_Save_Register(1);
break;
case NITIO_G0_Mode_Reg:
stc_register = G_Mode_Register(0);
break;
case NITIO_G1_Mode_Reg:
stc_register = G_Mode_Register(1);
break;
case NITIO_G0_LoadA_Reg:
stc_register = G_Load_A_Register(0);
break;
case NITIO_G1_LoadA_Reg:
stc_register = G_Load_A_Register(1);
break;
case NITIO_G0_LoadB_Reg:
stc_register = G_Load_B_Register(0);
break;
case NITIO_G1_LoadB_Reg:
stc_register = G_Load_B_Register(1);
break;
case NITIO_G0_Input_Select_Reg:
stc_register = G_Input_Select_Register(0);
break;
case NITIO_G1_Input_Select_Reg:
stc_register = G_Input_Select_Register(1);
break;
case NITIO_G01_Status_Reg:
stc_register = G_Status_Register;
break;
case NITIO_G01_Joint_Reset_Reg:
stc_register = Joint_Reset_Register;
break;
case NITIO_G01_Joint_Status1_Reg:
stc_register = Joint_Status_1_Register;
break;
case NITIO_G01_Joint_Status2_Reg:
stc_register = Joint_Status_2_Register;
break;
case NITIO_G0_Interrupt_Acknowledge_Reg:
stc_register = Interrupt_A_Ack_Register;
break;
case NITIO_G1_Interrupt_Acknowledge_Reg:
stc_register = Interrupt_B_Ack_Register;
break;
case NITIO_G0_Status_Reg:
stc_register = AI_Status_1_Register;
break;
case NITIO_G1_Status_Reg:
stc_register = AO_Status_1_Register;
break;
case NITIO_G0_Interrupt_Enable_Reg:
stc_register = Interrupt_A_Enable_Register;
break;
case NITIO_G1_Interrupt_Enable_Reg:
stc_register = Interrupt_B_Enable_Register;
break;
default:
printk("%s: unhandled register 0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
break;
}
return stc_register;
}
static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
unsigned stc_register;
/* bits in the join reset register which are relevant to counters */
static const unsigned gpct_joint_reset_mask = G0_Reset | G1_Reset;
static const unsigned gpct_interrupt_a_enable_mask =
G0_Gate_Interrupt_Enable | G0_TC_Interrupt_Enable;
static const unsigned gpct_interrupt_b_enable_mask =
G1_Gate_Interrupt_Enable | G1_TC_Interrupt_Enable;
switch (reg) {
/* m-series-only registers */
case NITIO_G0_Counting_Mode_Reg:
ni_writew(bits, M_Offset_G0_Counting_Mode);
break;
case NITIO_G1_Counting_Mode_Reg:
ni_writew(bits, M_Offset_G1_Counting_Mode);
break;
case NITIO_G0_Second_Gate_Reg:
ni_writew(bits, M_Offset_G0_Second_Gate);
break;
case NITIO_G1_Second_Gate_Reg:
ni_writew(bits, M_Offset_G1_Second_Gate);
break;
case NITIO_G0_DMA_Config_Reg:
ni_writew(bits, M_Offset_G0_DMA_Config);
break;
case NITIO_G1_DMA_Config_Reg:
ni_writew(bits, M_Offset_G1_DMA_Config);
break;
case NITIO_G0_ABZ_Reg:
ni_writew(bits, M_Offset_G0_MSeries_ABZ);
break;
case NITIO_G1_ABZ_Reg:
ni_writew(bits, M_Offset_G1_MSeries_ABZ);
break;
/* 32 bit registers */
case NITIO_G0_LoadA_Reg:
case NITIO_G1_LoadA_Reg:
case NITIO_G0_LoadB_Reg:
case NITIO_G1_LoadB_Reg:
stc_register = ni_gpct_to_stc_register(reg);
devpriv->stc_writel(dev, bits, stc_register);
break;
/* 16 bit registers */
case NITIO_G0_Interrupt_Enable_Reg:
BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
ni_set_bitfield(dev, Interrupt_A_Enable_Register,
gpct_interrupt_a_enable_mask, bits);
break;
case NITIO_G1_Interrupt_Enable_Reg:
BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
ni_set_bitfield(dev, Interrupt_B_Enable_Register,
gpct_interrupt_b_enable_mask, bits);
break;
case NITIO_G01_Joint_Reset_Reg:
BUG_ON(bits & ~gpct_joint_reset_mask);
/* fall-through */
default:
stc_register = ni_gpct_to_stc_register(reg);
devpriv->stc_writew(dev, bits, stc_register);
}
}
static unsigned ni_gpct_read_register(struct ni_gpct *counter,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
unsigned stc_register;
switch (reg) {
/* m-series only registers */
case NITIO_G0_DMA_Status_Reg:
return ni_readw(M_Offset_G0_DMA_Status);
break;
case NITIO_G1_DMA_Status_Reg:
return ni_readw(M_Offset_G1_DMA_Status);
break;
/* 32 bit registers */
case NITIO_G0_HW_Save_Reg:
case NITIO_G1_HW_Save_Reg:
case NITIO_G0_SW_Save_Reg:
case NITIO_G1_SW_Save_Reg:
stc_register = ni_gpct_to_stc_register(reg);
return devpriv->stc_readl(dev, stc_register);
break;
/* 16 bit registers */
default:
stc_register = ni_gpct_to_stc_register(reg);
return devpriv->stc_readw(dev, stc_register);
break;
}
return 0;
}
static int ni_freq_out_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
data[0] = devpriv->clock_and_fout & FOUT_Divider_mask;
return 1;
}
static int ni_freq_out_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
devpriv->clock_and_fout &= ~FOUT_Enable;
devpriv->stc_writew(dev, devpriv->clock_and_fout,
Clock_and_FOUT_Register);
devpriv->clock_and_fout &= ~FOUT_Divider_mask;
devpriv->clock_and_fout |= FOUT_Divider(data[0]);
devpriv->clock_and_fout |= FOUT_Enable;
devpriv->stc_writew(dev, devpriv->clock_and_fout,
Clock_and_FOUT_Register);
return insn->n;
}
static int ni_set_freq_out_clock(struct comedi_device *dev,
unsigned int clock_source)
{
switch (clock_source) {
case NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC:
devpriv->clock_and_fout &= ~FOUT_Timebase_Select;
break;
case NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC:
devpriv->clock_and_fout |= FOUT_Timebase_Select;
break;
default:
return -EINVAL;
}
devpriv->stc_writew(dev, devpriv->clock_and_fout,
Clock_and_FOUT_Register);
return 3;
}
static void ni_get_freq_out_clock(struct comedi_device *dev,
unsigned int *clock_source,
unsigned int *clock_period_ns)
{
if (devpriv->clock_and_fout & FOUT_Timebase_Select) {
*clock_source = NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC;
*clock_period_ns = TIMEBASE_2_NS;
} else {
*clock_source = NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC;
*clock_period_ns = TIMEBASE_1_NS * 2;
}
}
static int ni_freq_out_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
switch (data[0]) {
case INSN_CONFIG_SET_CLOCK_SRC:
return ni_set_freq_out_clock(dev, data[1]);
break;
case INSN_CONFIG_GET_CLOCK_SRC:
ni_get_freq_out_clock(dev, &data[1], &data[2]);
return 3;
default:
break;
}
return -EINVAL;
}
static int ni_alloc_private(struct comedi_device *dev)
{
int ret;
ret = alloc_private(dev, sizeof(struct ni_private));
if (ret < 0)
return ret;
spin_lock_init(&devpriv->window_lock);
spin_lock_init(&devpriv->soft_reg_copy_lock);
spin_lock_init(&devpriv->mite_channel_lock);
return 0;
};
static int ni_E_init(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
unsigned j;
enum ni_gpct_variant counter_variant;
if (boardtype.n_aochan > MAX_N_AO_CHAN) {
printk("bug! boardtype.n_aochan > MAX_N_AO_CHAN\n");
return -EINVAL;
}
if (alloc_subdevices(dev, NI_NUM_SUBDEVICES) < 0)
return -ENOMEM;
/* analog input subdevice */
s = dev->subdevices + NI_AI_SUBDEV;
dev->read_subdev = s;
if (boardtype.n_adchan) {
s->type = COMEDI_SUBD_AI;
s->subdev_flags =
SDF_READABLE | SDF_DIFF | SDF_DITHER | SDF_CMD_READ;
if (boardtype.reg_type != ni_reg_611x)
s->subdev_flags |= SDF_GROUND | SDF_COMMON | SDF_OTHER;
if (boardtype.adbits > 16)
s->subdev_flags |= SDF_LSAMPL;
if (boardtype.reg_type & ni_reg_m_series_mask)
s->subdev_flags |= SDF_SOFT_CALIBRATED;
s->n_chan = boardtype.n_adchan;
s->len_chanlist = 512;
s->maxdata = (1 << boardtype.adbits) - 1;
s->range_table = ni_range_lkup[boardtype.gainlkup];
s->insn_read = &ni_ai_insn_read;
s->insn_config = &ni_ai_insn_config;
s->do_cmdtest = &ni_ai_cmdtest;
s->do_cmd = &ni_ai_cmd;
s->cancel = &ni_ai_reset;
s->poll = &ni_ai_poll;
s->munge = &ni_ai_munge;
#ifdef PCIDMA
s->async_dma_dir = DMA_FROM_DEVICE;
#endif
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* analog output subdevice */
s = dev->subdevices + NI_AO_SUBDEV;
if (boardtype.n_aochan) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_DEGLITCH | SDF_GROUND;
if (boardtype.reg_type & ni_reg_m_series_mask)
s->subdev_flags |= SDF_SOFT_CALIBRATED;
s->n_chan = boardtype.n_aochan;
s->maxdata = (1 << boardtype.aobits) - 1;
s->range_table = boardtype.ao_range_table;
s->insn_read = &ni_ao_insn_read;
if (boardtype.reg_type & ni_reg_6xxx_mask) {
s->insn_write = &ni_ao_insn_write_671x;
} else {
s->insn_write = &ni_ao_insn_write;
}
s->insn_config = &ni_ao_insn_config;
#ifdef PCIDMA
if (boardtype.n_aochan) {
s->async_dma_dir = DMA_TO_DEVICE;
#else
if (boardtype.ao_fifo_depth) {
#endif
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
s->do_cmd = &ni_ao_cmd;
s->do_cmdtest = &ni_ao_cmdtest;
s->len_chanlist = boardtype.n_aochan;
if ((boardtype.reg_type & ni_reg_m_series_mask) == 0)
s->munge = ni_ao_munge;
}
s->cancel = &ni_ao_reset;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
if ((boardtype.reg_type & ni_reg_67xx_mask))
init_ao_67xx(dev, s);
/* digital i/o subdevice */
s = dev->subdevices + NI_DIO_SUBDEV;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->maxdata = 1;
s->io_bits = 0; /* all bits input */
s->range_table = &range_digital;
s->n_chan = boardtype.num_p0_dio_channels;
if (boardtype.reg_type & ni_reg_m_series_mask) {
s->subdev_flags |=
SDF_LSAMPL | SDF_CMD_WRITE /* | SDF_CMD_READ */ ;
s->insn_bits = &ni_m_series_dio_insn_bits;
s->insn_config = &ni_m_series_dio_insn_config;
s->do_cmd = &ni_cdio_cmd;
s->do_cmdtest = &ni_cdio_cmdtest;
s->cancel = &ni_cdio_cancel;
s->async_dma_dir = DMA_BIDIRECTIONAL;
s->len_chanlist = s->n_chan;
ni_writel(CDO_Reset_Bit | CDI_Reset_Bit, M_Offset_CDIO_Command);
ni_writel(s->io_bits, M_Offset_DIO_Direction);
} else {
s->insn_bits = &ni_dio_insn_bits;
s->insn_config = &ni_dio_insn_config;
devpriv->dio_control = DIO_Pins_Dir(s->io_bits);
ni_writew(devpriv->dio_control, DIO_Control_Register);
}
/* 8255 device */
s = dev->subdevices + NI_8255_DIO_SUBDEV;
if (boardtype.has_8255) {
subdev_8255_init(dev, s, ni_8255_callback, (unsigned long)dev);
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* formerly general purpose counter/timer device, but no longer used */
s = dev->subdevices + NI_UNUSED_SUBDEV;
s->type = COMEDI_SUBD_UNUSED;
/* calibration subdevice -- ai and ao */
s = dev->subdevices + NI_CALIBRATION_SUBDEV;
s->type = COMEDI_SUBD_CALIB;
if (boardtype.reg_type & ni_reg_m_series_mask) {
/* internal PWM analog output used for AI nonlinearity calibration */
s->subdev_flags = SDF_INTERNAL;
s->insn_config = &ni_m_series_pwm_config;
s->n_chan = 1;
s->maxdata = 0;
ni_writel(0x0, M_Offset_Cal_PWM);
} else if (boardtype.reg_type == ni_reg_6143) {
/* internal PWM analog output used for AI nonlinearity calibration */
s->subdev_flags = SDF_INTERNAL;
s->insn_config = &ni_6143_pwm_config;
s->n_chan = 1;
s->maxdata = 0;
} else {
s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL;
s->insn_read = &ni_calib_insn_read;
s->insn_write = &ni_calib_insn_write;
caldac_setup(dev, s);
}
/* EEPROM */
s = dev->subdevices + NI_EEPROM_SUBDEV;
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
s->maxdata = 0xff;
if (boardtype.reg_type & ni_reg_m_series_mask) {
s->n_chan = M_SERIES_EEPROM_SIZE;
s->insn_read = &ni_m_series_eeprom_insn_read;
} else {
s->n_chan = 512;
s->insn_read = &ni_eeprom_insn_read;
}
/* PFI */
s = dev->subdevices + NI_PFI_DIO_SUBDEV;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
if (boardtype.reg_type & ni_reg_m_series_mask) {
unsigned i;
s->n_chan = 16;
ni_writew(s->state, M_Offset_PFI_DO);
for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
ni_writew(devpriv->pfi_output_select_reg[i],
M_Offset_PFI_Output_Select(i + 1));
}
} else {
s->n_chan = 10;
}
s->maxdata = 1;
if (boardtype.reg_type & ni_reg_m_series_mask) {
s->insn_bits = &ni_pfi_insn_bits;
}
s->insn_config = &ni_pfi_insn_config;
ni_set_bits(dev, IO_Bidirection_Pin_Register, ~0, 0);
/* cs5529 calibration adc */
s = dev->subdevices + NI_CS5529_CALIBRATION_SUBDEV;
if (boardtype.reg_type & ni_reg_67xx_mask) {
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_INTERNAL;
/* one channel for each analog output channel */
s->n_chan = boardtype.n_aochan;
s->maxdata = (1 << 16) - 1;
s->range_table = &range_unknown; /* XXX */
s->insn_read = cs5529_ai_insn_read;
s->insn_config = NULL;
init_cs5529(dev);
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Serial */
s = dev->subdevices + NI_SERIAL_SUBDEV;
s->type = COMEDI_SUBD_SERIAL;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = 1;
s->maxdata = 0xff;
s->insn_config = ni_serial_insn_config;
devpriv->serial_interval_ns = 0;
devpriv->serial_hw_mode = 0;
/* RTSI */
s = dev->subdevices + NI_RTSI_SUBDEV;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = 8;
s->maxdata = 1;
s->insn_bits = ni_rtsi_insn_bits;
s->insn_config = ni_rtsi_insn_config;
ni_rtsi_init(dev);
if (boardtype.reg_type & ni_reg_m_series_mask) {
counter_variant = ni_gpct_variant_m_series;
} else {
counter_variant = ni_gpct_variant_e_series;
}
devpriv->counter_dev = ni_gpct_device_construct(dev,
&ni_gpct_write_register,
&ni_gpct_read_register,
counter_variant,
NUM_GPCT);
/* General purpose counters */
for (j = 0; j < NUM_GPCT; ++j) {
s = dev->subdevices + NI_GPCT_SUBDEV(j);
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags =
SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL | SDF_CMD_READ
/* | SDF_CMD_WRITE */ ;
s->n_chan = 3;
if (boardtype.reg_type & ni_reg_m_series_mask)
s->maxdata = 0xffffffff;
else
s->maxdata = 0xffffff;
s->insn_read = &ni_gpct_insn_read;
s->insn_write = &ni_gpct_insn_write;
s->insn_config = &ni_gpct_insn_config;
s->do_cmd = &ni_gpct_cmd;
s->len_chanlist = 1;
s->do_cmdtest = &ni_gpct_cmdtest;
s->cancel = &ni_gpct_cancel;
s->async_dma_dir = DMA_BIDIRECTIONAL;
s->private = &devpriv->counter_dev->counters[j];
devpriv->counter_dev->counters[j].chip_index = 0;
devpriv->counter_dev->counters[j].counter_index = j;
ni_tio_init_counter(&devpriv->counter_dev->counters[j]);
}
/* Frequency output */
s = dev->subdevices + NI_FREQ_OUT_SUBDEV;
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 1;
s->maxdata = 0xf;
s->insn_read = &ni_freq_out_insn_read;
s->insn_write = &ni_freq_out_insn_write;
s->insn_config = &ni_freq_out_insn_config;
/* ai configuration */
ni_ai_reset(dev, dev->subdevices + NI_AI_SUBDEV);
if ((boardtype.reg_type & ni_reg_6xxx_mask) == 0) {
/* BEAM is this needed for PCI-6143 ?? */
devpriv->clock_and_fout =
Slow_Internal_Time_Divide_By_2 |
Slow_Internal_Timebase |
Clock_To_Board_Divide_By_2 |
Clock_To_Board |
AI_Output_Divide_By_2 | AO_Output_Divide_By_2;
} else {
devpriv->clock_and_fout =
Slow_Internal_Time_Divide_By_2 |
Slow_Internal_Timebase |
Clock_To_Board_Divide_By_2 | Clock_To_Board;
}
devpriv->stc_writew(dev, devpriv->clock_and_fout,
Clock_and_FOUT_Register);
/* analog output configuration */
ni_ao_reset(dev, dev->subdevices + NI_AO_SUBDEV);
if (dev->irq) {
devpriv->stc_writew(dev,
(IRQ_POLARITY ? Interrupt_Output_Polarity :
0) | (Interrupt_Output_On_3_Pins & 0) |
Interrupt_A_Enable | Interrupt_B_Enable |
Interrupt_A_Output_Select(interrupt_pin
(dev->irq)) |
Interrupt_B_Output_Select(interrupt_pin
(dev->irq)),
Interrupt_Control_Register);
}
/* DMA setup */
ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select);
ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select);
if (boardtype.reg_type & ni_reg_6xxx_mask) {
ni_writeb(0, Magic_611x);
} else if (boardtype.reg_type & ni_reg_m_series_mask) {
int channel;
for (channel = 0; channel < boardtype.n_aochan; ++channel) {
ni_writeb(0xf, M_Offset_AO_Waveform_Order(channel));
ni_writeb(0x0,
M_Offset_AO_Reference_Attenuation(channel));
}
ni_writeb(0x0, M_Offset_AO_Calibration);
}
printk("\n");
return 0;
}
static int ni_8255_callback(int dir, int port, int data, unsigned long arg)
{
struct comedi_device *dev = (struct comedi_device *)arg;
if (dir) {
ni_writeb(data, Port_A + 2 * port);
return 0;
} else {
return ni_readb(Port_A + 2 * port);
}
}
/*
presents the EEPROM as a subdevice
*/
static int ni_eeprom_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
data[0] = ni_read_eeprom(dev, CR_CHAN(insn->chanspec));
return 1;
}
/*
reads bytes out of eeprom
*/
static int ni_read_eeprom(struct comedi_device *dev, int addr)
{
int bit;
int bitstring;
bitstring = 0x0300 | ((addr & 0x100) << 3) | (addr & 0xff);
ni_writeb(0x04, Serial_Command);
for (bit = 0x8000; bit; bit >>= 1) {
ni_writeb(0x04 | ((bit & bitstring) ? 0x02 : 0),
Serial_Command);
ni_writeb(0x05 | ((bit & bitstring) ? 0x02 : 0),
Serial_Command);
}
bitstring = 0;
for (bit = 0x80; bit; bit >>= 1) {
ni_writeb(0x04, Serial_Command);
ni_writeb(0x05, Serial_Command);
bitstring |= ((ni_readb(XXX_Status) & PROMOUT) ? bit : 0);
}
ni_writeb(0x00, Serial_Command);
return bitstring;
}
static int ni_m_series_eeprom_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[0] = devpriv->eeprom_buffer[CR_CHAN(insn->chanspec)];
return 1;
}
static int ni_get_pwm_config(struct comedi_device *dev, unsigned int *data)
{
data[1] = devpriv->pwm_up_count * devpriv->clock_ns;
data[2] = devpriv->pwm_down_count * devpriv->clock_ns;
return 3;
}
static int ni_m_series_pwm_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned up_count, down_count;
switch (data[0]) {
case INSN_CONFIG_PWM_OUTPUT:
switch (data[1]) {
case TRIG_ROUND_NEAREST:
up_count =
(data[2] +
devpriv->clock_ns / 2) / devpriv->clock_ns;
break;
case TRIG_ROUND_DOWN:
up_count = data[2] / devpriv->clock_ns;
break;
case TRIG_ROUND_UP:
up_count =
(data[2] + devpriv->clock_ns -
1) / devpriv->clock_ns;
break;
default:
return -EINVAL;
break;
}
switch (data[3]) {
case TRIG_ROUND_NEAREST:
down_count =
(data[4] +
devpriv->clock_ns / 2) / devpriv->clock_ns;
break;
case TRIG_ROUND_DOWN:
down_count = data[4] / devpriv->clock_ns;
break;
case TRIG_ROUND_UP:
down_count =
(data[4] + devpriv->clock_ns -
1) / devpriv->clock_ns;
break;
default:
return -EINVAL;
break;
}
if (up_count * devpriv->clock_ns != data[2] ||
down_count * devpriv->clock_ns != data[4]) {
data[2] = up_count * devpriv->clock_ns;
data[4] = down_count * devpriv->clock_ns;
return -EAGAIN;
}
ni_writel(MSeries_Cal_PWM_High_Time_Bits(up_count) |
MSeries_Cal_PWM_Low_Time_Bits(down_count),
M_Offset_Cal_PWM);
devpriv->pwm_up_count = up_count;
devpriv->pwm_down_count = down_count;
return 5;
break;
case INSN_CONFIG_GET_PWM_OUTPUT:
return ni_get_pwm_config(dev, data);
break;
default:
return -EINVAL;
break;
}
return 0;
}
static int ni_6143_pwm_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned up_count, down_count;
switch (data[0]) {
case INSN_CONFIG_PWM_OUTPUT:
switch (data[1]) {
case TRIG_ROUND_NEAREST:
up_count =
(data[2] +
devpriv->clock_ns / 2) / devpriv->clock_ns;
break;
case TRIG_ROUND_DOWN:
up_count = data[2] / devpriv->clock_ns;
break;
case TRIG_ROUND_UP:
up_count =
(data[2] + devpriv->clock_ns -
1) / devpriv->clock_ns;
break;
default:
return -EINVAL;
break;
}
switch (data[3]) {
case TRIG_ROUND_NEAREST:
down_count =
(data[4] +
devpriv->clock_ns / 2) / devpriv->clock_ns;
break;
case TRIG_ROUND_DOWN:
down_count = data[4] / devpriv->clock_ns;
break;
case TRIG_ROUND_UP:
down_count =
(data[4] + devpriv->clock_ns -
1) / devpriv->clock_ns;
break;
default:
return -EINVAL;
break;
}
if (up_count * devpriv->clock_ns != data[2] ||
down_count * devpriv->clock_ns != data[4]) {
data[2] = up_count * devpriv->clock_ns;
data[4] = down_count * devpriv->clock_ns;
return -EAGAIN;
}
ni_writel(up_count, Calibration_HighTime_6143);
devpriv->pwm_up_count = up_count;
ni_writel(down_count, Calibration_LowTime_6143);
devpriv->pwm_down_count = down_count;
return 5;
break;
case INSN_CONFIG_GET_PWM_OUTPUT:
return ni_get_pwm_config(dev, data);
default:
return -EINVAL;
break;
}
return 0;
}
static void ni_write_caldac(struct comedi_device *dev, int addr, int val);
/*
calibration subdevice
*/
static int ni_calib_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
ni_write_caldac(dev, CR_CHAN(insn->chanspec), data[0]);
return 1;
}
static int ni_calib_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
data[0] = devpriv->caldacs[CR_CHAN(insn->chanspec)];
return 1;
}
static int pack_mb88341(int addr, int val, int *bitstring);
static int pack_dac8800(int addr, int val, int *bitstring);
static int pack_dac8043(int addr, int val, int *bitstring);
static int pack_ad8522(int addr, int val, int *bitstring);
static int pack_ad8804(int addr, int val, int *bitstring);
static int pack_ad8842(int addr, int val, int *bitstring);
struct caldac_struct {
int n_chans;
int n_bits;
int (*packbits) (int, int, int *);
};
static struct caldac_struct caldacs[] = {
[mb88341] = {12, 8, pack_mb88341},
[dac8800] = {8, 8, pack_dac8800},
[dac8043] = {1, 12, pack_dac8043},
[ad8522] = {2, 12, pack_ad8522},
[ad8804] = {12, 8, pack_ad8804},
[ad8842] = {8, 8, pack_ad8842},
[ad8804_debug] = {16, 8, pack_ad8804},
};
static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s)
{
int i, j;
int n_dacs;
int n_chans = 0;
int n_bits;
int diffbits = 0;
int type;
int chan;
type = boardtype.caldac[0];
if (type == caldac_none)
return;
n_bits = caldacs[type].n_bits;
for (i = 0; i < 3; i++) {
type = boardtype.caldac[i];
if (type == caldac_none)
break;
if (caldacs[type].n_bits != n_bits)
diffbits = 1;
n_chans += caldacs[type].n_chans;
}
n_dacs = i;
s->n_chan = n_chans;
if (diffbits) {
unsigned int *maxdata_list;
if (n_chans > MAX_N_CALDACS) {
printk("BUG! MAX_N_CALDACS too small\n");
}
s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list;
chan = 0;
for (i = 0; i < n_dacs; i++) {
type = boardtype.caldac[i];
for (j = 0; j < caldacs[type].n_chans; j++) {
maxdata_list[chan] =
(1 << caldacs[type].n_bits) - 1;
chan++;
}
}
for (chan = 0; chan < s->n_chan; chan++)
ni_write_caldac(dev, i, s->maxdata_list[i] / 2);
} else {
type = boardtype.caldac[0];
s->maxdata = (1 << caldacs[type].n_bits) - 1;
for (chan = 0; chan < s->n_chan; chan++)
ni_write_caldac(dev, i, s->maxdata / 2);
}
}
static void ni_write_caldac(struct comedi_device *dev, int addr, int val)
{
unsigned int loadbit = 0, bits = 0, bit, bitstring = 0;
int i;
int type;
/* printk("ni_write_caldac: chan=%d val=%d\n",addr,val); */
if (devpriv->caldacs[addr] == val)
return;
devpriv->caldacs[addr] = val;
for (i = 0; i < 3; i++) {
type = boardtype.caldac[i];
if (type == caldac_none)
break;
if (addr < caldacs[type].n_chans) {
bits = caldacs[type].packbits(addr, val, &bitstring);
loadbit = SerDacLd(i);
/* printk("caldac: using i=%d addr=%d %x\n",i,addr,bitstring); */
break;
}
addr -= caldacs[type].n_chans;
}
for (bit = 1 << (bits - 1); bit; bit >>= 1) {
ni_writeb(((bit & bitstring) ? 0x02 : 0), Serial_Command);
udelay(1);
ni_writeb(1 | ((bit & bitstring) ? 0x02 : 0), Serial_Command);
udelay(1);
}
ni_writeb(loadbit, Serial_Command);
udelay(1);
ni_writeb(0, Serial_Command);
}
static int pack_mb88341(int addr, int val, int *bitstring)
{
/*
Fujitsu MB 88341
Note that address bits are reversed. Thanks to
Ingo Keen for noticing this.
Note also that the 88341 expects address values from
1-12, whereas we use channel numbers 0-11. The NI
docs use 1-12, also, so be careful here.
*/
addr++;
*bitstring = ((addr & 0x1) << 11) |
((addr & 0x2) << 9) |
((addr & 0x4) << 7) | ((addr & 0x8) << 5) | (val & 0xff);
return 12;
}
static int pack_dac8800(int addr, int val, int *bitstring)
{
*bitstring = ((addr & 0x7) << 8) | (val & 0xff);
return 11;
}
static int pack_dac8043(int addr, int val, int *bitstring)
{
*bitstring = val & 0xfff;
return 12;
}
static int pack_ad8522(int addr, int val, int *bitstring)
{
*bitstring = (val & 0xfff) | (addr ? 0xc000 : 0xa000);
return 16;
}
static int pack_ad8804(int addr, int val, int *bitstring)
{
*bitstring = ((addr & 0xf) << 8) | (val & 0xff);
return 12;
}
static int pack_ad8842(int addr, int val, int *bitstring)
{
*bitstring = ((addr + 1) << 8) | (val & 0xff);
return 12;
}
#if 0
/*
* Read the GPCTs current value.
*/
static int GPCT_G_Watch(struct comedi_device *dev, int chan)
{
unsigned int hi1, hi2, lo;
devpriv->gpct_command[chan] &= ~G_Save_Trace;
devpriv->stc_writew(dev, devpriv->gpct_command[chan],
G_Command_Register(chan));
devpriv->gpct_command[chan] |= G_Save_Trace;
devpriv->stc_writew(dev, devpriv->gpct_command[chan],
G_Command_Register(chan));
/* This procedure is used because the two registers cannot
* be read atomically. */
do {
hi1 = devpriv->stc_readw(dev, G_Save_Register_High(chan));
lo = devpriv->stc_readw(dev, G_Save_Register_Low(chan));
hi2 = devpriv->stc_readw(dev, G_Save_Register_High(chan));
} while (hi1 != hi2);
return (hi1 << 16) | lo;
}
static void GPCT_Reset(struct comedi_device *dev, int chan)
{
int temp_ack_reg = 0;
/* printk("GPCT_Reset..."); */
devpriv->gpct_cur_operation[chan] = GPCT_RESET;
switch (chan) {
case 0:
devpriv->stc_writew(dev, G0_Reset, Joint_Reset_Register);
ni_set_bits(dev, Interrupt_A_Enable_Register,
G0_TC_Interrupt_Enable, 0);
ni_set_bits(dev, Interrupt_A_Enable_Register,
G0_Gate_Interrupt_Enable, 0);
temp_ack_reg |= G0_Gate_Error_Confirm;
temp_ack_reg |= G0_TC_Error_Confirm;
temp_ack_reg |= G0_TC_Interrupt_Ack;
temp_ack_reg |= G0_Gate_Interrupt_Ack;
devpriv->stc_writew(dev, temp_ack_reg,
Interrupt_A_Ack_Register);
/* problem...this interferes with the other ctr... */
devpriv->an_trig_etc_reg |= GPFO_0_Output_Enable;
devpriv->stc_writew(dev, devpriv->an_trig_etc_reg,
Analog_Trigger_Etc_Register);
break;
case 1:
devpriv->stc_writew(dev, G1_Reset, Joint_Reset_Register);
ni_set_bits(dev, Interrupt_B_Enable_Register,
G1_TC_Interrupt_Enable, 0);
ni_set_bits(dev, Interrupt_B_Enable_Register,
G0_Gate_Interrupt_Enable, 0);
temp_ack_reg |= G1_Gate_Error_Confirm;
temp_ack_reg |= G1_TC_Error_Confirm;
temp_ack_reg |= G1_TC_Interrupt_Ack;
temp_ack_reg |= G1_Gate_Interrupt_Ack;
devpriv->stc_writew(dev, temp_ack_reg,
Interrupt_B_Ack_Register);
devpriv->an_trig_etc_reg |= GPFO_1_Output_Enable;
devpriv->stc_writew(dev, devpriv->an_trig_etc_reg,
Analog_Trigger_Etc_Register);
break;
};
devpriv->gpct_mode[chan] = 0;
devpriv->gpct_input_select[chan] = 0;
devpriv->gpct_command[chan] = 0;
devpriv->gpct_command[chan] |= G_Synchronized_Gate;
devpriv->stc_writew(dev, devpriv->gpct_mode[chan],
G_Mode_Register(chan));
devpriv->stc_writew(dev, devpriv->gpct_input_select[chan],
G_Input_Select_Register(chan));
devpriv->stc_writew(dev, 0, G_Autoincrement_Register(chan));
/* printk("exit GPCT_Reset\n"); */
}
#endif
static int ni_gpct_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct ni_gpct *counter = s->private;
return ni_tio_insn_config(counter, insn, data);
}
static int ni_gpct_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct ni_gpct *counter = s->private;
return ni_tio_rinsn(counter, insn, data);
}
static int ni_gpct_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct ni_gpct *counter = s->private;
return ni_tio_winsn(counter, insn, data);
}
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
int retval;
#ifdef PCIDMA
struct ni_gpct *counter = s->private;
/* const struct comedi_cmd *cmd = &s->async->cmd; */
retval = ni_request_gpct_mite_channel(dev, counter->counter_index,
COMEDI_INPUT);
if (retval) {
comedi_error(dev,
"no dma channel available for use by counter");
return retval;
}
ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
retval = ni_tio_cmd(counter, s->async);
#else
retval = -ENOTSUPP;
#endif
return retval;
}
static int ni_gpct_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
#ifdef PCIDMA
struct ni_gpct *counter = s->private;
return ni_tio_cmdtest(counter, cmd);
#else
return -ENOTSUPP;
#endif
}
static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
#ifdef PCIDMA
struct ni_gpct *counter = s->private;
int retval;
retval = ni_tio_cancel(counter);
ni_e_series_enable_second_irq(dev, counter->counter_index, 0);
ni_release_gpct_mite_channel(dev, counter->counter_index);
return retval;
#else
return 0;
#endif
}
/*
*
* Programmable Function Inputs
*
*/
static int ni_m_series_set_pfi_routing(struct comedi_device *dev, unsigned chan,
unsigned source)
{
unsigned pfi_reg_index;
unsigned array_offset;
if ((source & 0x1f) != source)
return -EINVAL;
pfi_reg_index = 1 + chan / 3;
array_offset = pfi_reg_index - 1;
devpriv->pfi_output_select_reg[array_offset] &=
~MSeries_PFI_Output_Select_Mask(chan);
devpriv->pfi_output_select_reg[array_offset] |=
MSeries_PFI_Output_Select_Bits(chan, source);
ni_writew(devpriv->pfi_output_select_reg[array_offset],
M_Offset_PFI_Output_Select(pfi_reg_index));
return 2;
}
static int ni_old_set_pfi_routing(struct comedi_device *dev, unsigned chan,
unsigned source)
{
/* pre-m-series boards have fixed signals on pfi pins */
if (source != ni_old_get_pfi_routing(dev, chan))
return -EINVAL;
return 2;
}
static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan,
unsigned source)
{
if (boardtype.reg_type & ni_reg_m_series_mask)
return ni_m_series_set_pfi_routing(dev, chan, source);
else
return ni_old_set_pfi_routing(dev, chan, source);
}
static unsigned ni_m_series_get_pfi_routing(struct comedi_device *dev,
unsigned chan)
{
const unsigned array_offset = chan / 3;
return MSeries_PFI_Output_Select_Source(chan,
devpriv->
pfi_output_select_reg
[array_offset]);
}
static unsigned ni_old_get_pfi_routing(struct comedi_device *dev, unsigned chan)
{
/* pre-m-series boards have fixed signals on pfi pins */
switch (chan) {
case 0:
return NI_PFI_OUTPUT_AI_START1;
break;
case 1:
return NI_PFI_OUTPUT_AI_START2;
break;
case 2:
return NI_PFI_OUTPUT_AI_CONVERT;
break;
case 3:
return NI_PFI_OUTPUT_G_SRC1;
break;
case 4:
return NI_PFI_OUTPUT_G_GATE1;
break;
case 5:
return NI_PFI_OUTPUT_AO_UPDATE_N;
break;
case 6:
return NI_PFI_OUTPUT_AO_START1;
break;
case 7:
return NI_PFI_OUTPUT_AI_START_PULSE;
break;
case 8:
return NI_PFI_OUTPUT_G_SRC0;
break;
case 9:
return NI_PFI_OUTPUT_G_GATE0;
break;
default:
printk("%s: bug, unhandled case in switch.\n", __func__);
break;
}
return 0;
}
static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan)
{
if (boardtype.reg_type & ni_reg_m_series_mask)
return ni_m_series_get_pfi_routing(dev, chan);
else
return ni_old_get_pfi_routing(dev, chan);
}
static int ni_config_filter(struct comedi_device *dev, unsigned pfi_channel,
enum ni_pfi_filter_select filter)
{
unsigned bits;
if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) {
return -ENOTSUPP;
}
bits = ni_readl(M_Offset_PFI_Filter);
bits &= ~MSeries_PFI_Filter_Select_Mask(pfi_channel);
bits |= MSeries_PFI_Filter_Select_Bits(pfi_channel, filter);
ni_writel(bits, M_Offset_PFI_Filter);
return 0;
}
static int ni_pfi_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) {
return -ENOTSUPP;
}
if (data[0]) {
s->state &= ~data[0];
s->state |= (data[0] & data[1]);
ni_writew(s->state, M_Offset_PFI_DO);
}
data[1] = ni_readw(M_Offset_PFI_DI);
return 2;
}
static int ni_pfi_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan;
if (insn->n < 1)
return -EINVAL;
chan = CR_CHAN(insn->chanspec);
switch (data[0]) {
case COMEDI_OUTPUT:
ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 1);
break;
case COMEDI_INPUT:
ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 0);
break;
case INSN_CONFIG_DIO_QUERY:
data[1] =
(devpriv->io_bidirection_pin_reg & (1 << chan)) ?
COMEDI_OUTPUT : COMEDI_INPUT;
return 0;
break;
case INSN_CONFIG_SET_ROUTING:
return ni_set_pfi_routing(dev, chan, data[1]);
break;
case INSN_CONFIG_GET_ROUTING:
data[1] = ni_get_pfi_routing(dev, chan);
break;
case INSN_CONFIG_FILTER:
return ni_config_filter(dev, chan, data[1]);
break;
default:
return -EINVAL;
}
return 0;
}
/*
*
* NI RTSI Bus Functions
*
*/
static void ni_rtsi_init(struct comedi_device *dev)
{
/* Initialises the RTSI bus signal switch to a default state */
/* Set clock mode to internal */
devpriv->clock_and_fout2 = MSeries_RTSI_10MHz_Bit;
if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) {
printk("ni_set_master_clock failed, bug?");
}
/* default internal lines routing to RTSI bus lines */
devpriv->rtsi_trig_a_output_reg =
RTSI_Trig_Output_Bits(0,
NI_RTSI_OUTPUT_ADR_START1) |
RTSI_Trig_Output_Bits(1,
NI_RTSI_OUTPUT_ADR_START2) |
RTSI_Trig_Output_Bits(2,
NI_RTSI_OUTPUT_SCLKG) |
RTSI_Trig_Output_Bits(3, NI_RTSI_OUTPUT_DACUPDN);
devpriv->stc_writew(dev, devpriv->rtsi_trig_a_output_reg,
RTSI_Trig_A_Output_Register);
devpriv->rtsi_trig_b_output_reg =
RTSI_Trig_Output_Bits(4,
NI_RTSI_OUTPUT_DA_START1) |
RTSI_Trig_Output_Bits(5,
NI_RTSI_OUTPUT_G_SRC0) |
RTSI_Trig_Output_Bits(6, NI_RTSI_OUTPUT_G_GATE0);
if (boardtype.reg_type & ni_reg_m_series_mask)
devpriv->rtsi_trig_b_output_reg |=
RTSI_Trig_Output_Bits(7, NI_RTSI_OUTPUT_RTSI_OSC);
devpriv->stc_writew(dev, devpriv->rtsi_trig_b_output_reg,
RTSI_Trig_B_Output_Register);
/*
* Sets the source and direction of the 4 on board lines
* devpriv->stc_writew(dev, 0x0000, RTSI_Board_Register);
*/
}
static int ni_rtsi_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
data[1] = 0;
return 2;
}
/* Find best multiplier/divider to try and get the PLL running at 80 MHz
* given an arbitrary frequency input clock */
static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
unsigned *freq_divider,
unsigned *freq_multiplier,
unsigned *actual_period_ns)
{
unsigned div;
unsigned best_div = 1;
static const unsigned max_div = 0x10;
unsigned mult;
unsigned best_mult = 1;
static const unsigned max_mult = 0x100;
static const unsigned pico_per_nano = 1000;
const unsigned reference_picosec = reference_period_ns * pico_per_nano;
/* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to
* 20 MHz for most timing clocks */
static const unsigned target_picosec = 12500;
static const unsigned fudge_factor_80_to_20Mhz = 4;
int best_period_picosec = 0;
for (div = 1; div <= max_div; ++div) {
for (mult = 1; mult <= max_mult; ++mult) {
unsigned new_period_ps =
(reference_picosec * div) / mult;
if (abs(new_period_ps - target_picosec) <
abs(best_period_picosec - target_picosec)) {
best_period_picosec = new_period_ps;
best_div = div;
best_mult = mult;
}
}
}
if (best_period_picosec == 0) {
printk("%s: bug, failed to find pll parameters\n", __func__);
return -EIO;
}
*freq_divider = best_div;
*freq_multiplier = best_mult;
*actual_period_ns =
(best_period_picosec * fudge_factor_80_to_20Mhz +
(pico_per_nano / 2)) / pico_per_nano;
return 0;
}
static inline unsigned num_configurable_rtsi_channels(struct comedi_device *dev)
{
if (boardtype.reg_type & ni_reg_m_series_mask)
return 8;
else
return 7;
}
static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
unsigned source, unsigned period_ns)
{
static const unsigned min_period_ns = 50;
static const unsigned max_period_ns = 1000;
static const unsigned timeout = 1000;
unsigned pll_control_bits;
unsigned freq_divider;
unsigned freq_multiplier;
unsigned i;
int retval;
if (source == NI_MIO_PLL_PXI10_CLOCK)
period_ns = 100;
/* these limits are somewhat arbitrary, but NI advertises 1 to 20MHz range so we'll use that */
if (period_ns < min_period_ns || period_ns > max_period_ns) {
printk
("%s: you must specify an input clock frequency between %i and %i nanosec "
"for the phased-lock loop.\n", __func__,
min_period_ns, max_period_ns);
return -EINVAL;
}
devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit;
devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
RTSI_Trig_Direction_Register);
pll_control_bits =
MSeries_PLL_Enable_Bit | MSeries_PLL_VCO_Mode_75_150MHz_Bits;
devpriv->clock_and_fout2 |=
MSeries_Timebase1_Select_Bit | MSeries_Timebase3_Select_Bit;
devpriv->clock_and_fout2 &= ~MSeries_PLL_In_Source_Select_Mask;
switch (source) {
case NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK:
devpriv->clock_and_fout2 |=
MSeries_PLL_In_Source_Select_Star_Trigger_Bits;
retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider,
&freq_multiplier,
&devpriv->clock_ns);
if (retval < 0)
return retval;
break;
case NI_MIO_PLL_PXI10_CLOCK:
/* pxi clock is 10MHz */
devpriv->clock_and_fout2 |=
MSeries_PLL_In_Source_Select_PXI_Clock10;
retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider,
&freq_multiplier,
&devpriv->clock_ns);
if (retval < 0)
return retval;
break;
default:
{
unsigned rtsi_channel;
static const unsigned max_rtsi_channel = 7;
for (rtsi_channel = 0; rtsi_channel <= max_rtsi_channel;
++rtsi_channel) {
if (source ==
NI_MIO_PLL_RTSI_CLOCK(rtsi_channel)) {
devpriv->clock_and_fout2 |=
MSeries_PLL_In_Source_Select_RTSI_Bits
(rtsi_channel);
break;
}
}
if (rtsi_channel > max_rtsi_channel)
return -EINVAL;
retval = ni_mseries_get_pll_parameters(period_ns,
&freq_divider,
&freq_multiplier,
&devpriv->
clock_ns);
if (retval < 0)
return retval;
}
break;
}
ni_writew(devpriv->clock_and_fout2, M_Offset_Clock_and_Fout2);
pll_control_bits |=
MSeries_PLL_Divisor_Bits(freq_divider) |
MSeries_PLL_Multiplier_Bits(freq_multiplier);
/* printk("using divider=%i, multiplier=%i for PLL. pll_control_bits = 0x%x\n",
* freq_divider, freq_multiplier, pll_control_bits); */
/* printk("clock_ns=%d\n", devpriv->clock_ns); */
ni_writew(pll_control_bits, M_Offset_PLL_Control);
devpriv->clock_source = source;
/* it seems to typically take a few hundred microseconds for PLL to lock */
for (i = 0; i < timeout; ++i) {
if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) {
break;
}
udelay(1);
}
if (i == timeout) {
printk
("%s: timed out waiting for PLL to lock to reference clock source %i with period %i ns.\n",
__func__, source, period_ns);
return -ETIMEDOUT;
}
return 3;
}
static int ni_set_master_clock(struct comedi_device *dev, unsigned source,
unsigned period_ns)
{
if (source == NI_MIO_INTERNAL_CLOCK) {
devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit;
devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
RTSI_Trig_Direction_Register);
devpriv->clock_ns = TIMEBASE_1_NS;
if (boardtype.reg_type & ni_reg_m_series_mask) {
devpriv->clock_and_fout2 &=
~(MSeries_Timebase1_Select_Bit |
MSeries_Timebase3_Select_Bit);
ni_writew(devpriv->clock_and_fout2,
M_Offset_Clock_and_Fout2);
ni_writew(0, M_Offset_PLL_Control);
}
devpriv->clock_source = source;
} else {
if (boardtype.reg_type & ni_reg_m_series_mask) {
return ni_mseries_set_pll_master_clock(dev, source,
period_ns);
} else {
if (source == NI_MIO_RTSI_CLOCK) {
devpriv->rtsi_trig_direction_reg |=
Use_RTSI_Clock_Bit;
devpriv->stc_writew(dev,
devpriv->
rtsi_trig_direction_reg,
RTSI_Trig_Direction_Register);
if (period_ns == 0) {
printk
("%s: we don't handle an unspecified clock period correctly yet, returning error.\n",
__func__);
return -EINVAL;
} else {
devpriv->clock_ns = period_ns;
}
devpriv->clock_source = source;
} else
return -EINVAL;
}
}
return 3;
}
static int ni_valid_rtsi_output_source(struct comedi_device *dev, unsigned chan,
unsigned source)
{
if (chan >= num_configurable_rtsi_channels(dev)) {
if (chan == old_RTSI_clock_channel) {
if (source == NI_RTSI_OUTPUT_RTSI_OSC)
return 1;
else {
printk
("%s: invalid source for channel=%i, channel %i is always the RTSI clock for pre-m-series boards.\n",
__func__, chan, old_RTSI_clock_channel);
return 0;
}
}
return 0;
}
switch (source) {
case NI_RTSI_OUTPUT_ADR_START1:
case NI_RTSI_OUTPUT_ADR_START2:
case NI_RTSI_OUTPUT_SCLKG:
case NI_RTSI_OUTPUT_DACUPDN:
case NI_RTSI_OUTPUT_DA_START1:
case NI_RTSI_OUTPUT_G_SRC0:
case NI_RTSI_OUTPUT_G_GATE0:
case NI_RTSI_OUTPUT_RGOUT0:
case NI_RTSI_OUTPUT_RTSI_BRD_0:
return 1;
break;
case NI_RTSI_OUTPUT_RTSI_OSC:
if (boardtype.reg_type & ni_reg_m_series_mask)
return 1;
else
return 0;
break;
default:
return 0;
break;
}
}
static int ni_set_rtsi_routing(struct comedi_device *dev, unsigned chan,
unsigned source)
{
if (ni_valid_rtsi_output_source(dev, chan, source) == 0)
return -EINVAL;
if (chan < 4) {
devpriv->rtsi_trig_a_output_reg &= ~RTSI_Trig_Output_Mask(chan);
devpriv->rtsi_trig_a_output_reg |=
RTSI_Trig_Output_Bits(chan, source);
devpriv->stc_writew(dev, devpriv->rtsi_trig_a_output_reg,
RTSI_Trig_A_Output_Register);
} else if (chan < 8) {
devpriv->rtsi_trig_b_output_reg &= ~RTSI_Trig_Output_Mask(chan);
devpriv->rtsi_trig_b_output_reg |=
RTSI_Trig_Output_Bits(chan, source);
devpriv->stc_writew(dev, devpriv->rtsi_trig_b_output_reg,
RTSI_Trig_B_Output_Register);
}
return 2;
}
static unsigned ni_get_rtsi_routing(struct comedi_device *dev, unsigned chan)
{
if (chan < 4) {
return RTSI_Trig_Output_Source(chan,
devpriv->rtsi_trig_a_output_reg);
} else if (chan < num_configurable_rtsi_channels(dev)) {
return RTSI_Trig_Output_Source(chan,
devpriv->rtsi_trig_b_output_reg);
} else {
if (chan == old_RTSI_clock_channel)
return NI_RTSI_OUTPUT_RTSI_OSC;
printk("%s: bug! should never get here?\n", __func__);
return 0;
}
}
static int ni_rtsi_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
if (chan < num_configurable_rtsi_channels(dev)) {
devpriv->rtsi_trig_direction_reg |=
RTSI_Output_Bit(chan,
(boardtype.
reg_type & ni_reg_m_series_mask) !=
0);
} else if (chan == old_RTSI_clock_channel) {
devpriv->rtsi_trig_direction_reg |=
Drive_RTSI_Clock_Bit;
}
devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
RTSI_Trig_Direction_Register);
break;
case INSN_CONFIG_DIO_INPUT:
if (chan < num_configurable_rtsi_channels(dev)) {
devpriv->rtsi_trig_direction_reg &=
~RTSI_Output_Bit(chan,
(boardtype.
reg_type & ni_reg_m_series_mask)
!= 0);
} else if (chan == old_RTSI_clock_channel) {
devpriv->rtsi_trig_direction_reg &=
~Drive_RTSI_Clock_Bit;
}
devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
RTSI_Trig_Direction_Register);
break;
case INSN_CONFIG_DIO_QUERY:
if (chan < num_configurable_rtsi_channels(dev)) {
data[1] =
(devpriv->rtsi_trig_direction_reg &
RTSI_Output_Bit(chan,
(boardtype.reg_type &
ni_reg_m_series_mask)
!= 0)) ? INSN_CONFIG_DIO_OUTPUT :
INSN_CONFIG_DIO_INPUT;
} else if (chan == old_RTSI_clock_channel) {
data[1] =
(devpriv->rtsi_trig_direction_reg &
Drive_RTSI_Clock_Bit)
? INSN_CONFIG_DIO_OUTPUT : INSN_CONFIG_DIO_INPUT;
}
return 2;
break;
case INSN_CONFIG_SET_CLOCK_SRC:
return ni_set_master_clock(dev, data[1], data[2]);
break;
case INSN_CONFIG_GET_CLOCK_SRC:
data[1] = devpriv->clock_source;
data[2] = devpriv->clock_ns;
return 3;
break;
case INSN_CONFIG_SET_ROUTING:
return ni_set_rtsi_routing(dev, chan, data[1]);
break;
case INSN_CONFIG_GET_ROUTING:
data[1] = ni_get_rtsi_routing(dev, chan);
return 2;
break;
default:
return -EINVAL;
break;
}
return 1;
}
static int cs5529_wait_for_idle(struct comedi_device *dev)
{
unsigned short status;
const int timeout = HZ;
int i;
for (i = 0; i < timeout; i++) {
status = ni_ao_win_inw(dev, CAL_ADC_Status_67xx);
if ((status & CSS_ADC_BUSY) == 0) {
break;
}
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout(1)) {
return -EIO;
}
}
/* printk("looped %i times waiting for idle\n", i); */
if (i == timeout) {
printk("%s: %s: timeout\n", __FILE__, __func__);
return -ETIME;
}
return 0;
}
static void cs5529_command(struct comedi_device *dev, unsigned short value)
{
static const int timeout = 100;
int i;
ni_ao_win_outw(dev, value, CAL_ADC_Command_67xx);
/* give time for command to start being serially clocked into cs5529.
* this insures that the CSS_ADC_BUSY bit will get properly
* set before we exit this function.
*/
for (i = 0; i < timeout; i++) {
if ((ni_ao_win_inw(dev, CAL_ADC_Status_67xx) & CSS_ADC_BUSY))
break;
udelay(1);
}
/* printk("looped %i times writing command to cs5529\n", i); */
if (i == timeout) {
comedi_error(dev, "possible problem - never saw adc go busy?");
}
}
/* write to cs5529 register */
static void cs5529_config_write(struct comedi_device *dev, unsigned int value,
unsigned int reg_select_bits)
{
ni_ao_win_outw(dev, ((value >> 16) & 0xff),
CAL_ADC_Config_Data_High_Word_67xx);
ni_ao_win_outw(dev, (value & 0xffff),
CAL_ADC_Config_Data_Low_Word_67xx);
reg_select_bits &= CSCMD_REGISTER_SELECT_MASK;
cs5529_command(dev, CSCMD_COMMAND | reg_select_bits);
if (cs5529_wait_for_idle(dev))
comedi_error(dev, "time or signal in cs5529_config_write()");
}
#ifdef NI_CS5529_DEBUG
/* read from cs5529 register */
static unsigned int cs5529_config_read(struct comedi_device *dev,
unsigned int reg_select_bits)
{
unsigned int value;
reg_select_bits &= CSCMD_REGISTER_SELECT_MASK;
cs5529_command(dev, CSCMD_COMMAND | CSCMD_READ | reg_select_bits);
if (cs5529_wait_for_idle(dev))
comedi_error(dev, "timeout or signal in cs5529_config_read()");
value = (ni_ao_win_inw(dev,
CAL_ADC_Config_Data_High_Word_67xx) << 16) &
0xff0000;
value |= ni_ao_win_inw(dev, CAL_ADC_Config_Data_Low_Word_67xx) & 0xffff;
return value;
}
#endif
static int cs5529_do_conversion(struct comedi_device *dev, unsigned short *data)
{
int retval;
unsigned short status;
cs5529_command(dev, CSCMD_COMMAND | CSCMD_SINGLE_CONVERSION);
retval = cs5529_wait_for_idle(dev);
if (retval) {
comedi_error(dev,
"timeout or signal in cs5529_do_conversion()");
return -ETIME;
}
status = ni_ao_win_inw(dev, CAL_ADC_Status_67xx);
if (status & CSS_OSC_DETECT) {
printk
("ni_mio_common: cs5529 conversion error, status CSS_OSC_DETECT\n");
return -EIO;
}
if (status & CSS_OVERRANGE) {
printk
("ni_mio_common: cs5529 conversion error, overrange (ignoring)\n");
}
if (data) {
*data = ni_ao_win_inw(dev, CAL_ADC_Data_67xx);
/* cs5529 returns 16 bit signed data in bipolar mode */
*data ^= (1 << 15);
}
return 0;
}
static int cs5529_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n, retval;
unsigned short sample;
unsigned int channel_select;
const unsigned int INTERNAL_REF = 0x1000;
/* Set calibration adc source. Docs lie, reference select bits 8 to 11
* do nothing. bit 12 seems to chooses internal reference voltage, bit
* 13 causes the adc input to go overrange (maybe reads external reference?) */
if (insn->chanspec & CR_ALT_SOURCE)
channel_select = INTERNAL_REF;
else
channel_select = CR_CHAN(insn->chanspec);
ni_ao_win_outw(dev, channel_select, AO_Calibration_Channel_Select_67xx);
for (n = 0; n < insn->n; n++) {
retval = cs5529_do_conversion(dev, &sample);
if (retval < 0)
return retval;
data[n] = sample;
}
return insn->n;
}
static int init_cs5529(struct comedi_device *dev)
{
unsigned int config_bits =
CSCFG_PORT_MODE | CSCFG_WORD_RATE_2180_CYCLES;
#if 1
/* do self-calibration */
cs5529_config_write(dev, config_bits | CSCFG_SELF_CAL_OFFSET_GAIN,
CSCMD_CONFIG_REGISTER);
/* need to force a conversion for calibration to run */
cs5529_do_conversion(dev, NULL);
#else
/* force gain calibration to 1 */
cs5529_config_write(dev, 0x400000, CSCMD_GAIN_REGISTER);
cs5529_config_write(dev, config_bits | CSCFG_SELF_CAL_OFFSET,
CSCMD_CONFIG_REGISTER);
if (cs5529_wait_for_idle(dev))
comedi_error(dev, "timeout or signal in init_cs5529()\n");
#endif
#ifdef NI_CS5529_DEBUG
printk("config: 0x%x\n", cs5529_config_read(dev,
CSCMD_CONFIG_REGISTER));
printk("gain: 0x%x\n", cs5529_config_read(dev, CSCMD_GAIN_REGISTER));
printk("offset: 0x%x\n", cs5529_config_read(dev,
CSCMD_OFFSET_REGISTER));
#endif
return 0;
}
| gpl-2.0 |
bigzz/linux | drivers/net/ethernet/alteon/acenic.c | 1155 | 87054 | /*
* acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
* and other Tigon based cards.
*
* Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>.
*
* Thanks to Alteon and 3Com for providing hardware and documentation
* enabling me to write this driver.
*
* A mailing list for discussing the use of this driver has been
* setup, please subscribe to the lists if you have any questions
* about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to
* see how to subscribe.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Additional credits:
* Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
* dump support. The trace dump support has not been
* integrated yet however.
* Troy Benjegerdes: Big Endian (PPC) patches.
* Nate Stahl: Better out of memory handling and stats support.
* Aman Singla: Nasty race between interrupt handler and tx code dealing
* with 'testing the tx_ret_csm and setting tx_full'
* David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
* infrastructure and Sparc support
* Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
* driver under Linux/Sparc64
* Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards
* ETHTOOL_GDRVINFO support
* Chip Salzenberg <chip@valinux.com>: Fix race condition between tx
* handler and close() cleanup.
* Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether
* memory mapped IO is enabled to
* make the driver work on RS/6000.
* Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem
* where the driver would disable
* bus master mode if it had to disable
* write and invalidate.
* Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little
* endian systems.
* Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and
* rx producer index when
* flushing the Jumbo ring.
* Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the
* driver init path.
* Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sockios.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/if_vlan.h>
#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
#endif
#include <net/sock.h>
#include <net/ip.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#define DRV_NAME "acenic"
#undef INDEX_DEBUG
#ifdef CONFIG_ACENIC_OMIT_TIGON_I
#define ACE_IS_TIGON_I(ap) 0
#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
#else
#define ACE_IS_TIGON_I(ap) (ap->version == 1)
#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
#endif
#ifndef PCI_VENDOR_ID_ALTEON
#define PCI_VENDOR_ID_ALTEON 0x12ae
#endif
#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
#endif
#ifndef PCI_DEVICE_ID_3COM_3C985
#define PCI_DEVICE_ID_3COM_3C985 0x0001
#endif
#ifndef PCI_VENDOR_ID_NETGEAR
#define PCI_VENDOR_ID_NETGEAR 0x1385
#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
#endif
#ifndef PCI_DEVICE_ID_NETGEAR_GA620T
#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
#endif
/*
* Farallon used the DEC vendor ID by mistake and they seem not
* to care - stinky!
*/
#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
#endif
#ifndef PCI_DEVICE_ID_FARALLON_PN9100T
#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
#endif
#ifndef PCI_VENDOR_ID_SGI
#define PCI_VENDOR_ID_SGI 0x10a9
#endif
#ifndef PCI_DEVICE_ID_SGI_ACENIC
#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
#endif
static const struct pci_device_id acenic_pci_tbl[] = {
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
/*
* Farallon used the DEC vendor ID on their cards incorrectly,
* then later Alteon's ID.
*/
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ }
};
MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
#define ace_sync_irq(irq) synchronize_irq(irq)
#ifndef offset_in_page
#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
#endif
#define ACE_MAX_MOD_PARMS 8
#define BOARD_IDX_STATIC 0
#define BOARD_IDX_OVERFLOW -1
#include "acenic.h"
/*
* These must be defined before the firmware is included.
*/
#define MAX_TEXT_LEN 96*1024
#define MAX_RODATA_LEN 8*1024
#define MAX_DATA_LEN 2*1024
#ifndef tigon2FwReleaseLocal
#define tigon2FwReleaseLocal 0
#endif
/*
* This driver currently supports Tigon I and Tigon II based cards
* including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
* GA620. The driver should also work on the SGI, DEC and Farallon
* versions of the card, however I have not been able to test that
* myself.
*
* This card is really neat, it supports receive hardware checksumming
* and jumbo frames (up to 9000 bytes) and does a lot of work in the
* firmware. Also the programming interface is quite neat, except for
* the parts dealing with the i2c eeprom on the card ;-)
*
* Using jumbo frames:
*
* To enable jumbo frames, simply specify an mtu between 1500 and 9000
* bytes to ifconfig. Jumbo frames can be enabled or disabled at any time
* by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet
* interface number and <MTU> being the MTU value.
*
* Module parameters:
*
* When compiled as a loadable module, the driver allows for a number
* of module parameters to be specified. The driver supports the
* following module parameters:
*
* trace=<val> - Firmware trace level. This requires special traced
* firmware to replace the firmware supplied with
* the driver - for debugging purposes only.
*
* link=<val> - Link state. Normally you want to use the default link
* parameters set by the driver. This can be used to
* override these in case your switch doesn't negotiate
* the link properly. Valid values are:
* 0x0001 - Force half duplex link.
* 0x0002 - Do not negotiate line speed with the other end.
* 0x0010 - 10Mbit/sec link.
* 0x0020 - 100Mbit/sec link.
* 0x0040 - 1000Mbit/sec link.
* 0x0100 - Do not negotiate flow control.
* 0x0200 - Enable RX flow control Y
* 0x0400 - Enable TX flow control Y (Tigon II NICs only).
* Default value is 0x0270, ie. enable link+flow
* control negotiation. Negotiating the highest
* possible link speed with RX flow control enabled.
*
* When disabling link speed negotiation, only one link
* speed is allowed to be specified!
*
* tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
* to wait for more packets to arive before
* interrupting the host, from the time the first
* packet arrives.
*
* rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
* to wait for more packets to arive in the transmit ring,
* before interrupting the host, after transmitting the
* first packet in the ring.
*
* max_tx_desc=<val> - maximum number of transmit descriptors
* (packets) transmitted before interrupting the host.
*
* max_rx_desc=<val> - maximum number of receive descriptors
* (packets) received before interrupting the host.
*
* tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th
* increments of the NIC's on board memory to be used for
* transmit and receive buffers. For the 1MB NIC app. 800KB
* is available, on the 1/2MB NIC app. 300KB is available.
* 68KB will always be available as a minimum for both
* directions. The default value is a 50/50 split.
* dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
* operations, default (1) is to always disable this as
* that is what Alteon does on NT. I have not been able
* to measure any real performance differences with
* this on my systems. Set <val>=0 if you want to
* enable these operations.
*
* If you use more than one NIC, specify the parameters for the
* individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
* run tracing on NIC #2 but not on NIC #1 and #3.
*
* TODO:
*
* - Proper multicast support.
* - NIC dump support.
* - More tuning parameters.
*
* The mini ring is not used under Linux and I am not sure it makes sense
* to actually use it.
*
* New interrupt handler strategy:
*
* The old interrupt handler worked using the traditional method of
* replacing an skbuff with a new one when a packet arrives. However
* the rx rings do not need to contain a static number of buffer
* descriptors, thus it makes sense to move the memory allocation out
* of the main interrupt handler and do it in a bottom half handler
* and only allocate new buffers when the number of buffers in the
* ring is below a certain threshold. In order to avoid starving the
* NIC under heavy load it is however necessary to force allocation
* when hitting a minimum threshold. The strategy for alloction is as
* follows:
*
* RX_LOW_BUF_THRES - allocate buffers in the bottom half
* RX_PANIC_LOW_THRES - we are very low on buffers, allocate
* the buffers in the interrupt handler
* RX_RING_THRES - maximum number of buffers in the rx ring
* RX_MINI_THRES - maximum number of buffers in the mini ring
* RX_JUMBO_THRES - maximum number of buffers in the jumbo ring
*
* One advantagous side effect of this allocation approach is that the
* entire rx processing can be done without holding any spin lock
* since the rx rings and registers are totally independent of the tx
* ring and its registers. This of course includes the kmalloc's of
* new skb's. Thus start_xmit can run in parallel with rx processing
* and the memory allocation on SMP systems.
*
* Note that running the skb reallocation in a bottom half opens up
* another can of races which needs to be handled properly. In
* particular it can happen that the interrupt handler tries to run
* the reallocation while the bottom half is either running on another
* CPU or was interrupted on the same CPU. To get around this the
* driver uses bitops to prevent the reallocation routines from being
* reentered.
*
* TX handling can also be done without holding any spin lock, wheee
* this is fun! since tx_ret_csm is only written to by the interrupt
* handler. The case to be aware of is when shutting down the device
* and cleaning up where it is necessary to make sure that
* start_xmit() is not running while this is happening. Well DaveM
* informs me that this case is already protected against ... bye bye
* Mr. Spin Lock, it was nice to know you.
*
* TX interrupts are now partly disabled so the NIC will only generate
* TX interrupts for the number of coal ticks, not for the number of
* TX packets in the queue. This should reduce the number of TX only,
* ie. when no RX processing is done, interrupts seen.
*/
/*
* Threshold values for RX buffer allocation - the low water marks for
* when to start refilling the rings are set to 75% of the ring
* sizes. It seems to make sense to refill the rings entirely from the
* intrrupt handler once it gets below the panic threshold, that way
* we don't risk that the refilling is moved to another CPU when the
* one running the interrupt handler just got the slab code hot in its
* cache.
*/
#define RX_RING_SIZE 72
#define RX_MINI_SIZE 64
#define RX_JUMBO_SIZE 48
#define RX_PANIC_STD_THRES 16
#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
#define RX_PANIC_MINI_THRES 12
#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
#define RX_PANIC_JUMBO_THRES 6
#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
/*
* Size of the mini ring entries, basically these just should be big
* enough to take TCP ACKs
*/
#define ACE_MINI_SIZE 100
#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
/*
* There seems to be a magic difference in the effect between 995 and 996
* but little difference between 900 and 995 ... no idea why.
*
* There is now a default set of tuning parameters which is set, depending
* on whether or not the user enables Jumbo frames. It's assumed that if
* Jumbo frames are enabled, the user wants optimal tuning for that case.
*/
#define DEF_TX_COAL 400 /* 996 */
#define DEF_TX_MAX_DESC 60 /* was 40 */
#define DEF_RX_COAL 120 /* 1000 */
#define DEF_RX_MAX_DESC 25
#define DEF_TX_RATIO 21 /* 24 */
#define DEF_JUMBO_TX_COAL 20
#define DEF_JUMBO_TX_MAX_DESC 60
#define DEF_JUMBO_RX_COAL 30
#define DEF_JUMBO_RX_MAX_DESC 6
#define DEF_JUMBO_TX_RATIO 21
#if tigon2FwReleaseLocal < 20001118
/*
* Standard firmware and early modifications duplicate
* IRQ load without this flag (coal timer is never reset).
* Note that with this flag tx_coal should be less than
* time to xmit full tx ring.
* 400usec is not so bad for tx ring size of 128.
*/
#define TX_COAL_INTS_ONLY 1 /* worth it */
#else
/*
* With modified firmware, this is not necessary, but still useful.
*/
#define TX_COAL_INTS_ONLY 1
#endif
#define DEF_TRACE 0
#define DEF_STAT (2 * TICKS_PER_SEC)
static int link_state[ACE_MAX_MOD_PARMS];
static int trace[ACE_MAX_MOD_PARMS];
static int tx_coal_tick[ACE_MAX_MOD_PARMS];
static int rx_coal_tick[ACE_MAX_MOD_PARMS];
static int max_tx_desc[ACE_MAX_MOD_PARMS];
static int max_rx_desc[ACE_MAX_MOD_PARMS];
static int tx_ratio[ACE_MAX_MOD_PARMS];
static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
#ifndef CONFIG_ACENIC_OMIT_TIGON_I
MODULE_FIRMWARE("acenic/tg1.bin");
#endif
MODULE_FIRMWARE("acenic/tg2.bin");
module_param_array_named(link, link_state, int, NULL, 0);
module_param_array(trace, int, NULL, 0);
module_param_array(tx_coal_tick, int, NULL, 0);
module_param_array(max_tx_desc, int, NULL, 0);
module_param_array(rx_coal_tick, int, NULL, 0);
module_param_array(max_rx_desc, int, NULL, 0);
module_param_array(tx_ratio, int, NULL, 0);
MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
static const char version[] =
"acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
" http://home.cern.ch/~jes/gige/acenic.html\n";
static int ace_get_settings(struct net_device *, struct ethtool_cmd *);
static int ace_set_settings(struct net_device *, struct ethtool_cmd *);
static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
static const struct ethtool_ops ace_ethtool_ops = {
.get_settings = ace_get_settings,
.set_settings = ace_set_settings,
.get_drvinfo = ace_get_drvinfo,
};
static void ace_watchdog(struct net_device *dev);
static const struct net_device_ops ace_netdev_ops = {
.ndo_open = ace_open,
.ndo_stop = ace_close,
.ndo_tx_timeout = ace_watchdog,
.ndo_get_stats = ace_get_stats,
.ndo_start_xmit = ace_start_xmit,
.ndo_set_rx_mode = ace_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ace_set_mac_addr,
.ndo_change_mtu = ace_change_mtu,
};
static int acenic_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct net_device *dev;
struct ace_private *ap;
static int boards_found;
dev = alloc_etherdev(sizeof(struct ace_private));
if (dev == NULL)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
ap = netdev_priv(dev);
ap->pdev = pdev;
ap->name = pci_name(pdev);
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->watchdog_timeo = 5*HZ;
dev->netdev_ops = &ace_netdev_ops;
dev->ethtool_ops = &ace_ethtool_ops;
/* we only display this string ONCE */
if (!boards_found)
printk(version);
if (pci_enable_device(pdev))
goto fail_free_netdev;
/*
* Enable master mode before we start playing with the
* pci_command word since pci_set_master() will modify
* it.
*/
pci_set_master(pdev);
pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
/* OpenFirmware on Mac's does not set this - DOH.. */
if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
"access - was not enabled by BIOS/Firmware\n",
ap->name);
ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
pci_write_config_word(ap->pdev, PCI_COMMAND,
ap->pci_command);
wmb();
}
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
if (ap->pci_latency <= 0x40) {
ap->pci_latency = 0x40;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
}
/*
* Remap the regs into kernel space - this is abuse of
* dev->base_addr since it was means for I/O port
* addresses but who gives a damn.
*/
dev->base_addr = pci_resource_start(pdev, 0);
ap->regs = ioremap(dev->base_addr, 0x4000);
if (!ap->regs) {
printk(KERN_ERR "%s: Unable to map I/O register, "
"AceNIC %i will be disabled.\n",
ap->name, boards_found);
goto fail_free_netdev;
}
switch(pdev->vendor) {
case PCI_VENDOR_ID_ALTEON:
if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
printk(KERN_INFO "%s: Farallon PN9100-T ",
ap->name);
} else {
printk(KERN_INFO "%s: Alteon AceNIC ",
ap->name);
}
break;
case PCI_VENDOR_ID_3COM:
printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
break;
case PCI_VENDOR_ID_NETGEAR:
printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
break;
case PCI_VENDOR_ID_DEC:
if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
printk(KERN_INFO "%s: Farallon PN9000-SX ",
ap->name);
break;
}
case PCI_VENDOR_ID_SGI:
printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
break;
default:
printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
break;
}
printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
printk("irq %d\n", pdev->irq);
#ifdef CONFIG_ACENIC_OMIT_TIGON_I
if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
printk(KERN_ERR "%s: Driver compiled without Tigon I"
" support - NIC disabled\n", dev->name);
goto fail_uninit;
}
#endif
if (ace_allocate_descriptors(dev))
goto fail_free_netdev;
#ifdef MODULE
if (boards_found >= ACE_MAX_MOD_PARMS)
ap->board_idx = BOARD_IDX_OVERFLOW;
else
ap->board_idx = boards_found;
#else
ap->board_idx = BOARD_IDX_STATIC;
#endif
if (ace_init(dev))
goto fail_free_netdev;
if (register_netdev(dev)) {
printk(KERN_ERR "acenic: device registration failed\n");
goto fail_uninit;
}
ap->name = dev->name;
if (ap->pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pdev, dev);
boards_found++;
return 0;
fail_uninit:
ace_init_cleanup(dev);
fail_free_netdev:
free_netdev(dev);
return -ENODEV;
}
static void acenic_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i;
unregister_netdev(dev);
writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
if (ap->version >= 2)
writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
/*
* This clears any pending interrupts
*/
writel(1, ®s->Mb0Lo);
readl(®s->CpuCtrl); /* flush */
/*
* Make sure no other CPUs are processing interrupts
* on the card before the buffers are being released.
* Otherwise one might experience some `interesting'
* effects.
*
* Then release the RX buffers - jumbo buffers were
* already released in ace_close().
*/
ace_sync_irq(dev->irq);
for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
if (skb) {
struct ring_info *ringp;
dma_addr_t mapping;
ringp = &ap->skb->rx_std_skbuff[i];
mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_std_ring[i].size = 0;
ap->skb->rx_std_skbuff[i].skb = NULL;
dev_kfree_skb(skb);
}
}
if (ap->version >= 2) {
for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
if (skb) {
struct ring_info *ringp;
dma_addr_t mapping;
ringp = &ap->skb->rx_mini_skbuff[i];
mapping = dma_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_mini_ring[i].size = 0;
ap->skb->rx_mini_skbuff[i].skb = NULL;
dev_kfree_skb(skb);
}
}
}
for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
if (skb) {
struct ring_info *ringp;
dma_addr_t mapping;
ringp = &ap->skb->rx_jumbo_skbuff[i];
mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_jumbo_ring[i].size = 0;
ap->skb->rx_jumbo_skbuff[i].skb = NULL;
dev_kfree_skb(skb);
}
}
ace_init_cleanup(dev);
free_netdev(dev);
}
static struct pci_driver acenic_pci_driver = {
.name = "acenic",
.id_table = acenic_pci_tbl,
.probe = acenic_probe_one,
.remove = acenic_remove_one,
};
static void ace_free_descriptors(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
int size;
if (ap->rx_std_ring != NULL) {
size = (sizeof(struct rx_desc) *
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES +
RX_MINI_RING_ENTRIES +
RX_RETURN_RING_ENTRIES));
pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
ap->rx_ring_base_dma);
ap->rx_std_ring = NULL;
ap->rx_jumbo_ring = NULL;
ap->rx_mini_ring = NULL;
ap->rx_return_ring = NULL;
}
if (ap->evt_ring != NULL) {
size = (sizeof(struct event) * EVT_RING_ENTRIES);
pci_free_consistent(ap->pdev, size, ap->evt_ring,
ap->evt_ring_dma);
ap->evt_ring = NULL;
}
if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
pci_free_consistent(ap->pdev, size, ap->tx_ring,
ap->tx_ring_dma);
}
ap->tx_ring = NULL;
if (ap->evt_prd != NULL) {
pci_free_consistent(ap->pdev, sizeof(u32),
(void *)ap->evt_prd, ap->evt_prd_dma);
ap->evt_prd = NULL;
}
if (ap->rx_ret_prd != NULL) {
pci_free_consistent(ap->pdev, sizeof(u32),
(void *)ap->rx_ret_prd,
ap->rx_ret_prd_dma);
ap->rx_ret_prd = NULL;
}
if (ap->tx_csm != NULL) {
pci_free_consistent(ap->pdev, sizeof(u32),
(void *)ap->tx_csm, ap->tx_csm_dma);
ap->tx_csm = NULL;
}
}
static int ace_allocate_descriptors(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
int size;
size = (sizeof(struct rx_desc) *
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES +
RX_MINI_RING_ENTRIES +
RX_RETURN_RING_ENTRIES));
ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
&ap->rx_ring_base_dma);
if (ap->rx_std_ring == NULL)
goto fail;
ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
size = (sizeof(struct event) * EVT_RING_ENTRIES);
ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
if (ap->evt_ring == NULL)
goto fail;
/*
* Only allocate a host TX ring for the Tigon II, the Tigon I
* has to use PCI registers for this ;-(
*/
if (!ACE_IS_TIGON_I(ap)) {
size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
&ap->tx_ring_dma);
if (ap->tx_ring == NULL)
goto fail;
}
ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
&ap->evt_prd_dma);
if (ap->evt_prd == NULL)
goto fail;
ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
&ap->rx_ret_prd_dma);
if (ap->rx_ret_prd == NULL)
goto fail;
ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
&ap->tx_csm_dma);
if (ap->tx_csm == NULL)
goto fail;
return 0;
fail:
/* Clean up. */
ace_init_cleanup(dev);
return 1;
}
/*
* Generic cleanup handling data allocated during init. Used when the
* module is unloaded or if an error occurs during initialization
*/
static void ace_init_cleanup(struct net_device *dev)
{
struct ace_private *ap;
ap = netdev_priv(dev);
ace_free_descriptors(dev);
if (ap->info)
pci_free_consistent(ap->pdev, sizeof(struct ace_info),
ap->info, ap->info_dma);
kfree(ap->skb);
kfree(ap->trace_buf);
if (dev->irq)
free_irq(dev->irq, dev);
iounmap(ap->regs);
}
/*
* Commands are considered to be slow.
*/
static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
{
u32 idx;
idx = readl(®s->CmdPrd);
writel(*(u32 *)(cmd), ®s->CmdRng[idx]);
idx = (idx + 1) % CMD_RING_ENTRIES;
writel(idx, ®s->CmdPrd);
}
static int ace_init(struct net_device *dev)
{
struct ace_private *ap;
struct ace_regs __iomem *regs;
struct ace_info *info = NULL;
struct pci_dev *pdev;
unsigned long myjif;
u64 tmp_ptr;
u32 tig_ver, mac1, mac2, tmp, pci_state;
int board_idx, ecode = 0;
short i;
unsigned char cache_size;
ap = netdev_priv(dev);
regs = ap->regs;
board_idx = ap->board_idx;
/*
* aman@sgi.com - its useful to do a NIC reset here to
* address the `Firmware not running' problem subsequent
* to any crashes involving the NIC
*/
writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl);
readl(®s->HostCtrl); /* PCI write posting */
udelay(5);
/*
* Don't access any other registers before this point!
*/
#ifdef __BIG_ENDIAN
/*
* This will most likely need BYTE_SWAP once we switch
* to using __raw_writel()
*/
writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
®s->HostCtrl);
#else
writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
®s->HostCtrl);
#endif
readl(®s->HostCtrl); /* PCI write posting */
/*
* Stop the NIC CPU and clear pending interrupts
*/
writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
readl(®s->CpuCtrl); /* PCI write posting */
writel(0, ®s->Mb0Lo);
tig_ver = readl(®s->HostCtrl) >> 28;
switch(tig_ver){
#ifndef CONFIG_ACENIC_OMIT_TIGON_I
case 4:
case 5:
printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ",
tig_ver, ap->firmware_major, ap->firmware_minor,
ap->firmware_fix);
writel(0, ®s->LocalCtrl);
ap->version = 1;
ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
break;
#endif
case 6:
printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
tig_ver, ap->firmware_major, ap->firmware_minor,
ap->firmware_fix);
writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
readl(®s->CpuBCtrl); /* PCI write posting */
/*
* The SRAM bank size does _not_ indicate the amount
* of memory on the card, it controls the _bank_ size!
* Ie. a 1MB AceNIC will have two banks of 512KB.
*/
writel(SRAM_BANK_512K, ®s->LocalCtrl);
writel(SYNC_SRAM_TIMING, ®s->MiscCfg);
ap->version = 2;
ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
break;
default:
printk(KERN_WARNING " Unsupported Tigon version detected "
"(%i)\n", tig_ver);
ecode = -ENODEV;
goto init_error;
}
/*
* ModeStat _must_ be set after the SRAM settings as this change
* seems to corrupt the ModeStat and possible other registers.
* The SRAM settings survive resets and setting it to the same
* value a second time works as well. This is what caused the
* `Firmware not running' problem on the Tigon II.
*/
#ifdef __BIG_ENDIAN
writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
#else
writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
#endif
readl(®s->ModeStat); /* PCI write posting */
mac1 = 0;
for(i = 0; i < 4; i++) {
int t;
mac1 = mac1 << 8;
t = read_eeprom_byte(dev, 0x8c+i);
if (t < 0) {
ecode = -EIO;
goto init_error;
} else
mac1 |= (t & 0xff);
}
mac2 = 0;
for(i = 4; i < 8; i++) {
int t;
mac2 = mac2 << 8;
t = read_eeprom_byte(dev, 0x8c+i);
if (t < 0) {
ecode = -EIO;
goto init_error;
} else
mac2 |= (t & 0xff);
}
writel(mac1, ®s->MacAddrHi);
writel(mac2, ®s->MacAddrLo);
dev->dev_addr[0] = (mac1 >> 8) & 0xff;
dev->dev_addr[1] = mac1 & 0xff;
dev->dev_addr[2] = (mac2 >> 24) & 0xff;
dev->dev_addr[3] = (mac2 >> 16) & 0xff;
dev->dev_addr[4] = (mac2 >> 8) & 0xff;
dev->dev_addr[5] = mac2 & 0xff;
printk("MAC: %pM\n", dev->dev_addr);
/*
* Looks like this is necessary to deal with on all architectures,
* even this %$#%$# N440BX Intel based thing doesn't get it right.
* Ie. having two NICs in the machine, one will have the cache
* line set at boot time, the other will not.
*/
pdev = ap->pdev;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
cache_size <<= 2;
if (cache_size != SMP_CACHE_BYTES) {
printk(KERN_INFO " PCI cache line size set incorrectly "
"(%i bytes) by BIOS/FW, ", cache_size);
if (cache_size > SMP_CACHE_BYTES)
printk("expecting %i\n", SMP_CACHE_BYTES);
else {
printk("correcting to %i\n", SMP_CACHE_BYTES);
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
SMP_CACHE_BYTES >> 2);
}
}
pci_state = readl(®s->PciState);
printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
"latency: %i clks\n",
(pci_state & PCI_32BIT) ? 32 : 64,
(pci_state & PCI_66MHZ) ? 66 : 33,
ap->pci_latency);
/*
* Set the max DMA transfer size. Seems that for most systems
* the performance is better when no MAX parameter is
* set. However for systems enabling PCI write and invalidate,
* DMA writes must be set to the L1 cache line size to get
* optimal performance.
*
* The default is now to turn the PCI write and invalidate off
* - that is what Alteon does for NT.
*/
tmp = READ_CMD_MEM | WRITE_CMD_MEM;
if (ap->version >= 2) {
tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
/*
* Tuning parameters only supported for 8 cards
*/
if (board_idx == BOARD_IDX_OVERFLOW ||
dis_pci_mem_inval[board_idx]) {
if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
pci_write_config_word(pdev, PCI_COMMAND,
ap->pci_command);
printk(KERN_INFO " Disabling PCI memory "
"write and invalidate\n");
}
} else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
printk(KERN_INFO " PCI memory write & invalidate "
"enabled by BIOS, enabling counter measures\n");
switch(SMP_CACHE_BYTES) {
case 16:
tmp |= DMA_WRITE_MAX_16;
break;
case 32:
tmp |= DMA_WRITE_MAX_32;
break;
case 64:
tmp |= DMA_WRITE_MAX_64;
break;
case 128:
tmp |= DMA_WRITE_MAX_128;
break;
default:
printk(KERN_INFO " Cache line size %i not "
"supported, PCI write and invalidate "
"disabled\n", SMP_CACHE_BYTES);
ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
pci_write_config_word(pdev, PCI_COMMAND,
ap->pci_command);
}
}
}
#ifdef __sparc__
/*
* On this platform, we know what the best dma settings
* are. We use 64-byte maximum bursts, because if we
* burst larger than the cache line size (or even cross
* a 64byte boundary in a single burst) the UltraSparc
* PCI controller will disconnect at 64-byte multiples.
*
* Read-multiple will be properly enabled above, and when
* set will give the PCI controller proper hints about
* prefetching.
*/
tmp &= ~DMA_READ_WRITE_MASK;
tmp |= DMA_READ_MAX_64;
tmp |= DMA_WRITE_MAX_64;
#endif
#ifdef __alpha__
tmp &= ~DMA_READ_WRITE_MASK;
tmp |= DMA_READ_MAX_128;
/*
* All the docs say MUST NOT. Well, I did.
* Nothing terrible happens, if we load wrong size.
* Bit w&i still works better!
*/
tmp |= DMA_WRITE_MAX_128;
#endif
writel(tmp, ®s->PciState);
#if 0
/*
* The Host PCI bus controller driver has to set FBB.
* If all devices on that PCI bus support FBB, then the controller
* can enable FBB support in the Host PCI Bus controller (or on
* the PCI-PCI bridge if that applies).
* -ggg
*/
/*
* I have received reports from people having problems when this
* bit is enabled.
*/
if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
ap->pci_command |= PCI_COMMAND_FAST_BACK;
pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
}
#endif
/*
* Configure DMA attributes.
*/
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
ap->pci_using_dac = 1;
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
ap->pci_using_dac = 0;
} else {
ecode = -ENODEV;
goto init_error;
}
/*
* Initialize the generic info block and the command+event rings
* and the control blocks for the transmit and receive rings
* as they need to be setup once and for all.
*/
if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
&ap->info_dma))) {
ecode = -EAGAIN;
goto init_error;
}
ap->info = info;
/*
* Get the memory for the skb rings.
*/
if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
ecode = -EAGAIN;
goto init_error;
}
ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,
DRV_NAME, dev);
if (ecode) {
printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
DRV_NAME, pdev->irq);
goto init_error;
} else
dev->irq = pdev->irq;
#ifdef INDEX_DEBUG
spin_lock_init(&ap->debug_lock);
ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
ap->last_std_rx = 0;
ap->last_mini_rx = 0;
#endif
memset(ap->info, 0, sizeof(struct ace_info));
memset(ap->skb, 0, sizeof(struct ace_skb));
ecode = ace_load_firmware(dev);
if (ecode)
goto init_error;
ap->fw_running = 0;
tmp_ptr = ap->info_dma;
writel(tmp_ptr >> 32, ®s->InfoPtrHi);
writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo);
memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
info->evt_ctrl.flags = 0;
*(ap->evt_prd) = 0;
wmb();
set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
writel(0, ®s->EvtCsm);
set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
info->cmd_ctrl.flags = 0;
info->cmd_ctrl.max_len = 0;
for (i = 0; i < CMD_RING_ENTRIES; i++)
writel(0, ®s->CmdRng[i]);
writel(0, ®s->CmdPrd);
writel(0, ®s->CmdCsm);
tmp_ptr = ap->info_dma;
tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
info->rx_std_ctrl.flags =
RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
memset(ap->rx_std_ring, 0,
RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
for (i = 0; i < RX_STD_RING_ENTRIES; i++)
ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
ap->rx_std_skbprd = 0;
atomic_set(&ap->cur_rx_bufs, 0);
set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
(ap->rx_ring_base_dma +
(sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
info->rx_jumbo_ctrl.max_len = 0;
info->rx_jumbo_ctrl.flags =
RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
memset(ap->rx_jumbo_ring, 0,
RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
ap->rx_jumbo_skbprd = 0;
atomic_set(&ap->cur_jumbo_bufs, 0);
memset(ap->rx_mini_ring, 0,
RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
if (ap->version >= 2) {
set_aceaddr(&info->rx_mini_ctrl.rngptr,
(ap->rx_ring_base_dma +
(sizeof(struct rx_desc) *
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES))));
info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
info->rx_mini_ctrl.flags =
RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST;
for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
ap->rx_mini_ring[i].flags =
BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
} else {
set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
info->rx_mini_ctrl.max_len = 0;
}
ap->rx_mini_skbprd = 0;
atomic_set(&ap->cur_mini_bufs, 0);
set_aceaddr(&info->rx_return_ctrl.rngptr,
(ap->rx_ring_base_dma +
(sizeof(struct rx_desc) *
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES +
RX_MINI_RING_ENTRIES))));
info->rx_return_ctrl.flags = 0;
info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
memset(ap->rx_return_ring, 0,
RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
*(ap->rx_ret_prd) = 0;
writel(TX_RING_BASE, ®s->WinBase);
if (ACE_IS_TIGON_I(ap)) {
ap->tx_ring = (__force struct tx_desc *) regs->Window;
for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
* sizeof(struct tx_desc)) / sizeof(u32); i++)
writel(0, (__force void __iomem *)ap->tx_ring + i * 4);
set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
} else {
memset(ap->tx_ring, 0,
MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
}
info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
/*
* The Tigon I does not like having the TX ring in host memory ;-(
*/
if (!ACE_IS_TIGON_I(ap))
tmp |= RCB_FLG_TX_HOST_RING;
#if TX_COAL_INTS_ONLY
tmp |= RCB_FLG_COAL_INT_ONLY;
#endif
info->tx_ctrl.flags = tmp;
set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
/*
* Potential item for tuning parameter
*/
#if 0 /* NO */
writel(DMA_THRESH_16W, ®s->DmaReadCfg);
writel(DMA_THRESH_16W, ®s->DmaWriteCfg);
#else
writel(DMA_THRESH_8W, ®s->DmaReadCfg);
writel(DMA_THRESH_8W, ®s->DmaWriteCfg);
#endif
writel(0, ®s->MaskInt);
writel(1, ®s->IfIdx);
#if 0
/*
* McKinley boxes do not like us fiddling with AssistState
* this early
*/
writel(1, ®s->AssistState);
#endif
writel(DEF_STAT, ®s->TuneStatTicks);
writel(DEF_TRACE, ®s->TuneTrace);
ace_set_rxtx_parms(dev, 0);
if (board_idx == BOARD_IDX_OVERFLOW) {
printk(KERN_WARNING "%s: more than %i NICs detected, "
"ignoring module parameters!\n",
ap->name, ACE_MAX_MOD_PARMS);
} else if (board_idx >= 0) {
if (tx_coal_tick[board_idx])
writel(tx_coal_tick[board_idx],
®s->TuneTxCoalTicks);
if (max_tx_desc[board_idx])
writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc);
if (rx_coal_tick[board_idx])
writel(rx_coal_tick[board_idx],
®s->TuneRxCoalTicks);
if (max_rx_desc[board_idx])
writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc);
if (trace[board_idx])
writel(trace[board_idx], ®s->TuneTrace);
if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
writel(tx_ratio[board_idx], ®s->TxBufRat);
}
/*
* Default link parameters
*/
tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
if(ap->version >= 2)
tmp |= LNK_TX_FLOW_CTL_Y;
/*
* Override link default parameters
*/
if ((board_idx >= 0) && link_state[board_idx]) {
int option = link_state[board_idx];
tmp = LNK_ENABLE;
if (option & 0x01) {
printk(KERN_INFO "%s: Setting half duplex link\n",
ap->name);
tmp &= ~LNK_FULL_DUPLEX;
}
if (option & 0x02)
tmp &= ~LNK_NEGOTIATE;
if (option & 0x10)
tmp |= LNK_10MB;
if (option & 0x20)
tmp |= LNK_100MB;
if (option & 0x40)
tmp |= LNK_1000MB;
if ((option & 0x70) == 0) {
printk(KERN_WARNING "%s: No media speed specified, "
"forcing auto negotiation\n", ap->name);
tmp |= LNK_NEGOTIATE | LNK_1000MB |
LNK_100MB | LNK_10MB;
}
if ((option & 0x100) == 0)
tmp |= LNK_NEG_FCTL;
else
printk(KERN_INFO "%s: Disabling flow control "
"negotiation\n", ap->name);
if (option & 0x200)
tmp |= LNK_RX_FLOW_CTL_Y;
if ((option & 0x400) && (ap->version >= 2)) {
printk(KERN_INFO "%s: Enabling TX flow control\n",
ap->name);
tmp |= LNK_TX_FLOW_CTL_Y;
}
}
ap->link = tmp;
writel(tmp, ®s->TuneLink);
if (ap->version >= 2)
writel(tmp, ®s->TuneFastLink);
writel(ap->firmware_start, ®s->Pc);
writel(0, ®s->Mb0Lo);
/*
* Set tx_csm before we start receiving interrupts, otherwise
* the interrupt handler might think it is supposed to process
* tx ints before we are up and running, which may cause a null
* pointer access in the int handler.
*/
ap->cur_rx = 0;
ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
wmb();
ace_set_txprd(regs, ap, 0);
writel(0, ®s->RxRetCsm);
/*
* Enable DMA engine now.
* If we do this sooner, Mckinley box pukes.
* I assume it's because Tigon II DMA engine wants to check
* *something* even before the CPU is started.
*/
writel(1, ®s->AssistState); /* enable DMA */
/*
* Start the NIC CPU
*/
writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl);
readl(®s->CpuCtrl);
/*
* Wait for the firmware to spin up - max 3 seconds.
*/
myjif = jiffies + 3 * HZ;
while (time_before(jiffies, myjif) && !ap->fw_running)
cpu_relax();
if (!ap->fw_running) {
printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
ace_dump_trace(ap);
writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
readl(®s->CpuCtrl);
/* aman@sgi.com - account for badly behaving firmware/NIC:
* - have observed that the NIC may continue to generate
* interrupts for some reason; attempt to stop it - halt
* second CPU for Tigon II cards, and also clear Mb0
* - if we're a module, we'll fail to load if this was
* the only GbE card in the system => if the kernel does
* see an interrupt from the NIC, code to handle it is
* gone and OOps! - so free_irq also
*/
if (ap->version >= 2)
writel(readl(®s->CpuBCtrl) | CPU_HALT,
®s->CpuBCtrl);
writel(0, ®s->Mb0Lo);
readl(®s->Mb0Lo);
ecode = -EBUSY;
goto init_error;
}
/*
* We load the ring here as there seem to be no way to tell the
* firmware to wipe the ring without re-initializing it.
*/
if (!test_and_set_bit(0, &ap->std_refill_busy))
ace_load_std_rx_ring(dev, RX_RING_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
ap->name);
if (ap->version >= 2) {
if (!test_and_set_bit(0, &ap->mini_refill_busy))
ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling "
"the RX mini ring\n", ap->name);
}
return 0;
init_error:
ace_init_cleanup(dev);
return ecode;
}
static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
int board_idx = ap->board_idx;
if (board_idx >= 0) {
if (!jumbo) {
if (!tx_coal_tick[board_idx])
writel(DEF_TX_COAL, ®s->TuneTxCoalTicks);
if (!max_tx_desc[board_idx])
writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc);
if (!rx_coal_tick[board_idx])
writel(DEF_RX_COAL, ®s->TuneRxCoalTicks);
if (!max_rx_desc[board_idx])
writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc);
if (!tx_ratio[board_idx])
writel(DEF_TX_RATIO, ®s->TxBufRat);
} else {
if (!tx_coal_tick[board_idx])
writel(DEF_JUMBO_TX_COAL,
®s->TuneTxCoalTicks);
if (!max_tx_desc[board_idx])
writel(DEF_JUMBO_TX_MAX_DESC,
®s->TuneMaxTxDesc);
if (!rx_coal_tick[board_idx])
writel(DEF_JUMBO_RX_COAL,
®s->TuneRxCoalTicks);
if (!max_rx_desc[board_idx])
writel(DEF_JUMBO_RX_MAX_DESC,
®s->TuneMaxRxDesc);
if (!tx_ratio[board_idx])
writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat);
}
}
}
static void ace_watchdog(struct net_device *data)
{
struct net_device *dev = data;
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
/*
* We haven't received a stats update event for more than 2.5
* seconds and there is data in the transmit queue, thus we
* assume the card is stuck.
*/
if (*ap->tx_csm != ap->tx_ret_csm) {
printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
dev->name, (unsigned int)readl(®s->HostCtrl));
/* This can happen due to ieee flow control. */
} else {
printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
dev->name);
#if 0
netif_wake_queue(dev);
#endif
}
}
static void ace_tasklet(unsigned long arg)
{
struct net_device *dev = (struct net_device *) arg;
struct ace_private *ap = netdev_priv(dev);
int cur_size;
cur_size = atomic_read(&ap->cur_rx_bufs);
if ((cur_size < RX_LOW_STD_THRES) &&
!test_and_set_bit(0, &ap->std_refill_busy)) {
#ifdef DEBUG
printk("refilling buffers (current %i)\n", cur_size);
#endif
ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
}
if (ap->version >= 2) {
cur_size = atomic_read(&ap->cur_mini_bufs);
if ((cur_size < RX_LOW_MINI_THRES) &&
!test_and_set_bit(0, &ap->mini_refill_busy)) {
#ifdef DEBUG
printk("refilling mini buffers (current %i)\n",
cur_size);
#endif
ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
}
}
cur_size = atomic_read(&ap->cur_jumbo_bufs);
if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
!test_and_set_bit(0, &ap->jumbo_refill_busy)) {
#ifdef DEBUG
printk("refilling jumbo buffers (current %i)\n", cur_size);
#endif
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
ap->tasklet_pending = 0;
}
/*
* Copy the contents of the NIC's trace buffer to kernel memory.
*/
static void ace_dump_trace(struct ace_private *ap)
{
#if 0
if (!ap->trace_buf)
if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
return;
#endif
}
/*
* Load the standard rx ring.
*
* Loading rings is safe without holding the spin lock since this is
* done only before the device is enabled, thus no interrupts are
* generated and by the interrupt handler/tasklet handler.
*/
static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
prefetchw(&ap->cur_rx_bufs);
idx = ap->rx_std_skbprd;
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
struct rx_desc *rd;
dma_addr_t mapping;
skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
if (!skb)
break;
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
mapping, mapping);
rd = &ap->rx_std_ring[idx];
set_aceaddr(&rd->addr, mapping);
rd->size = ACE_STD_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_STD_RING_ENTRIES;
}
if (!i)
goto error_out;
atomic_add(i, &ap->cur_rx_bufs);
ap->rx_std_skbprd = idx;
if (ACE_IS_TIGON_I(ap)) {
struct cmd cmd;
cmd.evt = C_SET_RX_PRD_IDX;
cmd.code = 0;
cmd.idx = ap->rx_std_skbprd;
ace_issue_cmd(regs, &cmd);
} else {
writel(idx, ®s->RxStdPrd);
wmb();
}
out:
clear_bit(0, &ap->std_refill_busy);
return;
error_out:
printk(KERN_INFO "Out of memory when allocating "
"standard receive buffers\n");
goto out;
}
static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
prefetchw(&ap->cur_mini_bufs);
idx = ap->rx_mini_skbprd;
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
struct rx_desc *rd;
dma_addr_t mapping;
skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
if (!skb)
break;
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
mapping, mapping);
rd = &ap->rx_mini_ring[idx];
set_aceaddr(&rd->addr, mapping);
rd->size = ACE_MINI_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_MINI_RING_ENTRIES;
}
if (!i)
goto error_out;
atomic_add(i, &ap->cur_mini_bufs);
ap->rx_mini_skbprd = idx;
writel(idx, ®s->RxMiniPrd);
wmb();
out:
clear_bit(0, &ap->mini_refill_busy);
return;
error_out:
printk(KERN_INFO "Out of memory when allocating "
"mini receive buffers\n");
goto out;
}
/*
* Load the jumbo rx ring, this may happen at any time if the MTU
* is changed to a value > 1500.
*/
static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
idx = ap->rx_jumbo_skbprd;
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
struct rx_desc *rd;
dma_addr_t mapping;
skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
if (!skb)
break;
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
mapping, mapping);
rd = &ap->rx_jumbo_ring[idx];
set_aceaddr(&rd->addr, mapping);
rd->size = ACE_JUMBO_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
}
if (!i)
goto error_out;
atomic_add(i, &ap->cur_jumbo_bufs);
ap->rx_jumbo_skbprd = idx;
if (ACE_IS_TIGON_I(ap)) {
struct cmd cmd;
cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
cmd.code = 0;
cmd.idx = ap->rx_jumbo_skbprd;
ace_issue_cmd(regs, &cmd);
} else {
writel(idx, ®s->RxJumboPrd);
wmb();
}
out:
clear_bit(0, &ap->jumbo_refill_busy);
return;
error_out:
if (net_ratelimit())
printk(KERN_INFO "Out of memory when allocating "
"jumbo receive buffers\n");
goto out;
}
/*
* All events are considered to be slow (RX/TX ints do not generate
* events) and are handled here, outside the main interrupt handler,
* to reduce the size of the handler.
*/
static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
{
struct ace_private *ap;
ap = netdev_priv(dev);
while (evtcsm != evtprd) {
switch (ap->evt_ring[evtcsm].evt) {
case E_FW_RUNNING:
printk(KERN_INFO "%s: Firmware up and running\n",
ap->name);
ap->fw_running = 1;
wmb();
break;
case E_STATS_UPDATED:
break;
case E_LNK_STATE:
{
u16 code = ap->evt_ring[evtcsm].code;
switch (code) {
case E_C_LINK_UP:
{
u32 state = readl(&ap->regs->GigLnkState);
printk(KERN_WARNING "%s: Optical link UP "
"(%s Duplex, Flow Control: %s%s)\n",
ap->name,
state & LNK_FULL_DUPLEX ? "Full":"Half",
state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
break;
}
case E_C_LINK_DOWN:
printk(KERN_WARNING "%s: Optical link DOWN\n",
ap->name);
break;
case E_C_LINK_10_100:
printk(KERN_WARNING "%s: 10/100BaseT link "
"UP\n", ap->name);
break;
default:
printk(KERN_ERR "%s: Unknown optical link "
"state %02x\n", ap->name, code);
}
break;
}
case E_ERROR:
switch(ap->evt_ring[evtcsm].code) {
case E_C_ERR_INVAL_CMD:
printk(KERN_ERR "%s: invalid command error\n",
ap->name);
break;
case E_C_ERR_UNIMP_CMD:
printk(KERN_ERR "%s: unimplemented command "
"error\n", ap->name);
break;
case E_C_ERR_BAD_CFG:
printk(KERN_ERR "%s: bad config error\n",
ap->name);
break;
default:
printk(KERN_ERR "%s: unknown error %02x\n",
ap->name, ap->evt_ring[evtcsm].code);
}
break;
case E_RESET_JUMBO_RNG:
{
int i;
for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
if (ap->skb->rx_jumbo_skbuff[i].skb) {
ap->rx_jumbo_ring[i].size = 0;
set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
ap->skb->rx_jumbo_skbuff[i].skb = NULL;
}
}
if (ACE_IS_TIGON_I(ap)) {
struct cmd cmd;
cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(ap->regs, &cmd);
} else {
writel(0, &((ap->regs)->RxJumboPrd));
wmb();
}
ap->jumbo = 0;
ap->rx_jumbo_skbprd = 0;
printk(KERN_INFO "%s: Jumbo ring flushed\n",
ap->name);
clear_bit(0, &ap->jumbo_refill_busy);
break;
}
default:
printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
ap->name, ap->evt_ring[evtcsm].evt);
}
evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
}
return evtcsm;
}
static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
{
struct ace_private *ap = netdev_priv(dev);
u32 idx;
int mini_count = 0, std_count = 0;
idx = rxretcsm;
prefetchw(&ap->cur_rx_bufs);
prefetchw(&ap->cur_mini_bufs);
while (idx != rxretprd) {
struct ring_info *rip;
struct sk_buff *skb;
struct rx_desc *rxdesc, *retdesc;
u32 skbidx;
int bd_flags, desc_type, mapsize;
u16 csum;
/* make sure the rx descriptor isn't read before rxretprd */
if (idx == rxretcsm)
rmb();
retdesc = &ap->rx_return_ring[idx];
skbidx = retdesc->idx;
bd_flags = retdesc->flags;
desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
switch(desc_type) {
/*
* Normal frames do not have any flags set
*
* Mini and normal frames arrive frequently,
* so use a local counter to avoid doing
* atomic operations for each packet arriving.
*/
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
mapsize = ACE_STD_BUFSIZE;
rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
mapsize = ACE_JUMBO_BUFSIZE;
rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
mapsize = ACE_MINI_BUFSIZE;
rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
default:
printk(KERN_INFO "%s: unknown frame type (0x%02x) "
"returned by NIC\n", dev->name,
retdesc->flags);
goto error;
}
skb = rip->skb;
rip->skb = NULL;
pci_unmap_page(ap->pdev,
dma_unmap_addr(rip, mapping),
mapsize,
PCI_DMA_FROMDEVICE);
skb_put(skb, retdesc->size);
/*
* Fly baby, fly!
*/
csum = retdesc->tcp_udp_csum;
skb->protocol = eth_type_trans(skb, dev);
/*
* Instead of forcing the poor tigon mips cpu to calculate
* pseudo hdr checksum, we do this ourselves.
*/
if (bd_flags & BD_FLG_TCP_UDP_SUM) {
skb->csum = htons(csum);
skb->ip_summed = CHECKSUM_COMPLETE;
} else {
skb_checksum_none_assert(skb);
}
/* send it up */
if ((bd_flags & BD_FLG_VLAN_TAG))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += retdesc->size;
idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
}
atomic_sub(std_count, &ap->cur_rx_bufs);
if (!ACE_IS_TIGON_I(ap))
atomic_sub(mini_count, &ap->cur_mini_bufs);
out:
/*
* According to the documentation RxRetCsm is obsolete with
* the 12.3.x Firmware - my Tigon I NICs seem to disagree!
*/
if (ACE_IS_TIGON_I(ap)) {
writel(idx, &ap->regs->RxRetCsm);
}
ap->cur_rx = idx;
return;
error:
idx = rxretprd;
goto out;
}
static inline void ace_tx_int(struct net_device *dev,
u32 txcsm, u32 idx)
{
struct ace_private *ap = netdev_priv(dev);
do {
struct sk_buff *skb;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + idx;
skb = info->skb;
if (dma_unmap_len(info, maplen)) {
pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dev_kfree_skb_irq(skb);
info->skb = NULL;
}
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
} while (idx != txcsm);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
wmb();
ap->tx_ret_csm = txcsm;
/* So... tx_ret_csm is advanced _after_ check for device wakeup.
*
* We could try to make it before. In this case we would get
* the following race condition: hard_start_xmit on other cpu
* enters after we advanced tx_ret_csm and fills space,
* which we have just freed, so that we make illegal device wakeup.
* There is no good way to workaround this (at entry
* to ace_start_xmit detects this condition and prevents
* ring corruption, but it is not a good workaround.)
*
* When tx_ret_csm is advanced after, we wake up device _only_
* if we really have some space in ring (though the core doing
* hard_start_xmit can see full ring for some period and has to
* synchronize.) Superb.
* BUT! We get another subtle race condition. hard_start_xmit
* may think that ring is full between wakeup and advancing
* tx_ret_csm and will stop device instantly! It is not so bad.
* We are guaranteed that there is something in ring, so that
* the next irq will resume transmission. To speedup this we could
* mark descriptor, which closes ring with BD_FLG_COAL_NOW
* (see ace_start_xmit).
*
* Well, this dilemma exists in all lock-free devices.
* We, following scheme used in drivers by Donald Becker,
* select the least dangerous.
* --ANK
*/
}
static irqreturn_t ace_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
u32 idx;
u32 txcsm, rxretcsm, rxretprd;
u32 evtcsm, evtprd;
/*
* In case of PCI shared interrupts or spurious interrupts,
* we want to make sure it is actually our interrupt before
* spending any time in here.
*/
if (!(readl(®s->HostCtrl) & IN_INT))
return IRQ_NONE;
/*
* ACK intr now. Otherwise we will lose updates to rx_ret_prd,
* which happened _after_ rxretprd = *ap->rx_ret_prd; but before
* writel(0, ®s->Mb0Lo).
*
* "IRQ avoidance" recommended in docs applies to IRQs served
* threads and it is wrong even for that case.
*/
writel(0, ®s->Mb0Lo);
readl(®s->Mb0Lo);
/*
* There is no conflict between transmit handling in
* start_xmit and receive processing, thus there is no reason
* to take a spin lock for RX handling. Wait until we start
* working on the other stuff - hey we don't need a spin lock
* anymore.
*/
rxretprd = *ap->rx_ret_prd;
rxretcsm = ap->cur_rx;
if (rxretprd != rxretcsm)
ace_rx_int(dev, rxretprd, rxretcsm);
txcsm = *ap->tx_csm;
idx = ap->tx_ret_csm;
if (txcsm != idx) {
/*
* If each skb takes only one descriptor this check degenerates
* to identity, because new space has just been opened.
* But if skbs are fragmented we must check that this index
* update releases enough of space, otherwise we just
* wait for device to make more work.
*/
if (!tx_ring_full(ap, txcsm, ap->tx_prd))
ace_tx_int(dev, txcsm, idx);
}
evtcsm = readl(®s->EvtCsm);
evtprd = *ap->evt_prd;
if (evtcsm != evtprd) {
evtcsm = ace_handle_event(dev, evtcsm, evtprd);
writel(evtcsm, ®s->EvtCsm);
}
/*
* This has to go last in the interrupt handler and run with
* the spin lock released ... what lock?
*/
if (netif_running(dev)) {
int cur_size;
int run_tasklet = 0;
cur_size = atomic_read(&ap->cur_rx_bufs);
if (cur_size < RX_LOW_STD_THRES) {
if ((cur_size < RX_PANIC_STD_THRES) &&
!test_and_set_bit(0, &ap->std_refill_busy)) {
#ifdef DEBUG
printk("low on std buffers %i\n", cur_size);
#endif
ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
run_tasklet = 1;
}
if (!ACE_IS_TIGON_I(ap)) {
cur_size = atomic_read(&ap->cur_mini_bufs);
if (cur_size < RX_LOW_MINI_THRES) {
if ((cur_size < RX_PANIC_MINI_THRES) &&
!test_and_set_bit(0,
&ap->mini_refill_busy)) {
#ifdef DEBUG
printk("low on mini buffers %i\n",
cur_size);
#endif
ace_load_mini_rx_ring(dev,
RX_MINI_SIZE - cur_size);
} else
run_tasklet = 1;
}
}
if (ap->jumbo) {
cur_size = atomic_read(&ap->cur_jumbo_bufs);
if (cur_size < RX_LOW_JUMBO_THRES) {
if ((cur_size < RX_PANIC_JUMBO_THRES) &&
!test_and_set_bit(0,
&ap->jumbo_refill_busy)){
#ifdef DEBUG
printk("low on jumbo buffers %i\n",
cur_size);
#endif
ace_load_jumbo_rx_ring(dev,
RX_JUMBO_SIZE - cur_size);
} else
run_tasklet = 1;
}
}
if (run_tasklet && !ap->tasklet_pending) {
ap->tasklet_pending = 1;
tasklet_schedule(&ap->ace_tasklet);
}
}
return IRQ_HANDLED;
}
static int ace_open(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct cmd cmd;
if (!(ap->fw_running)) {
printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
return -EBUSY;
}
writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu);
cmd.evt = C_CLEAR_STATS;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
cmd.evt = C_HOST_STATE;
cmd.code = C_C_STACK_UP;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
if (ap->jumbo &&
!test_and_set_bit(0, &ap->jumbo_refill_busy))
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
if (dev->flags & IFF_PROMISC) {
cmd.evt = C_SET_PROMISC_MODE;
cmd.code = C_C_PROMISC_ENABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->promisc = 1;
}else
ap->promisc = 0;
ap->mcast_all = 0;
#if 0
cmd.evt = C_LNK_NEGOTIATION;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
#endif
netif_start_queue(dev);
/*
* Setup the bottom half rx ring refill handler
*/
tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
return 0;
}
static int ace_close(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct cmd cmd;
unsigned long flags;
short i;
/*
* Without (or before) releasing irq and stopping hardware, this
* is an absolute non-sense, by the way. It will be reset instantly
* by the first irq.
*/
netif_stop_queue(dev);
if (ap->promisc) {
cmd.evt = C_SET_PROMISC_MODE;
cmd.code = C_C_PROMISC_DISABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->promisc = 0;
}
cmd.evt = C_HOST_STATE;
cmd.code = C_C_STACK_DOWN;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
tasklet_kill(&ap->ace_tasklet);
/*
* Make sure one CPU is not processing packets while
* buffers are being released by another.
*/
local_irq_save(flags);
ace_mask_irq(dev);
for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
struct sk_buff *skb;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + i;
skb = info->skb;
if (dma_unmap_len(info, maplen)) {
if (ACE_IS_TIGON_I(ap)) {
/* NB: TIGON_1 is special, tx_ring is in io space */
struct tx_desc __iomem *tx;
tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
writel(0, &tx->addr.addrhi);
writel(0, &tx->addr.addrlo);
writel(0, &tx->flagsize);
} else
memset(ap->tx_ring + i, 0,
sizeof(struct tx_desc));
pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
dev_kfree_skb(skb);
info->skb = NULL;
}
}
if (ap->jumbo) {
cmd.evt = C_RESET_JUMBO_RNG;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}
ace_unmask_irq(dev);
local_irq_restore(flags);
return 0;
}
static inline dma_addr_t
ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
struct sk_buff *tail, u32 idx)
{
dma_addr_t mapping;
struct tx_ring_info *info;
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
skb->len, PCI_DMA_TODEVICE);
info = ap->skb->tx_skbuff + idx;
info->skb = tail;
dma_unmap_addr_set(info, mapping, mapping);
dma_unmap_len_set(info, maplen, skb->len);
return mapping;
}
static inline void
ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
u32 flagsize, u32 vlan_tag)
{
#if !USE_TX_COAL_NOW
flagsize &= ~BD_FLG_COAL_NOW;
#endif
if (ACE_IS_TIGON_I(ap)) {
struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc;
writel(addr >> 32, &io->addr.addrhi);
writel(addr & 0xffffffff, &io->addr.addrlo);
writel(flagsize, &io->flagsize);
writel(vlan_tag, &io->vlanres);
} else {
desc->addr.addrhi = addr >> 32;
desc->addr.addrlo = addr;
desc->flagsize = flagsize;
desc->vlanres = vlan_tag;
}
}
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct tx_desc *desc;
u32 idx, flagsize;
unsigned long maxjiff = jiffies + 3*HZ;
restart:
idx = ap->tx_prd;
if (tx_ring_full(ap, ap->tx_ret_csm, idx))
goto overflow;
if (!skb_shinfo(skb)->nr_frags) {
dma_addr_t mapping;
u32 vlan_tag = 0;
mapping = ace_map_tx_skb(ap, skb, skb, idx);
flagsize = (skb->len << 16) | (BD_FLG_END);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
if (skb_vlan_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
vlan_tag = skb_vlan_tag_get(skb);
}
desc = ap->tx_ring + idx;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
/* Look at ace_tx_int for explanations. */
if (tx_ring_full(ap, ap->tx_ret_csm, idx))
flagsize |= BD_FLG_COAL_NOW;
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
} else {
dma_addr_t mapping;
u32 vlan_tag = 0;
int i, len = 0;
mapping = ace_map_tx_skb(ap, skb, NULL, idx);
flagsize = (skb_headlen(skb) << 16);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
if (skb_vlan_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
vlan_tag = skb_vlan_tag_get(skb);
}
ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
len += skb_frag_size(frag);
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
flagsize = skb_frag_size(frag) << 16;
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
if (i == skb_shinfo(skb)->nr_frags - 1) {
flagsize |= BD_FLG_END;
if (tx_ring_full(ap, ap->tx_ret_csm, idx))
flagsize |= BD_FLG_COAL_NOW;
/*
* Only the last fragment frees
* the skb!
*/
info->skb = skb;
} else {
info->skb = NULL;
}
dma_unmap_addr_set(info, mapping, mapping);
dma_unmap_len_set(info, maplen, skb_frag_size(frag));
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
wmb();
ap->tx_prd = idx;
ace_set_txprd(regs, ap, idx);
if (flagsize & BD_FLG_COAL_NOW) {
netif_stop_queue(dev);
/*
* A TX-descriptor producer (an IRQ) might have gotten
* between, making the ring free again. Since xmit is
* serialized, this is the only situation we have to
* re-test.
*/
if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
netif_wake_queue(dev);
}
return NETDEV_TX_OK;
overflow:
/*
* This race condition is unavoidable with lock-free drivers.
* We wake up the queue _before_ tx_prd is advanced, so that we can
* enter hard_start_xmit too early, while tx ring still looks closed.
* This happens ~1-4 times per 100000 packets, so that we can allow
* to loop syncing to other CPU. Probably, we need an additional
* wmb() in ace_tx_intr as well.
*
* Note that this race is relieved by reserving one more entry
* in tx ring than it is necessary (see original non-SG driver).
* However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which
* is already overkill.
*
* Alternative is to return with 1 not throttling queue. In this
* case loop becomes longer, no more useful effects.
*/
if (time_before(jiffies, maxjiff)) {
barrier();
cpu_relax();
goto restart;
}
/* The ring is stuck full. */
printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
return NETDEV_TX_BUSY;
}
static int ace_change_mtu(struct net_device *dev, int new_mtu)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
if (new_mtu > ACE_JUMBO_MTU)
return -EINVAL;
writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu);
dev->mtu = new_mtu;
if (new_mtu > ACE_STD_MTU) {
if (!(ap->jumbo)) {
printk(KERN_INFO "%s: Enabling Jumbo frame "
"support\n", dev->name);
ap->jumbo = 1;
if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
ace_set_rxtx_parms(dev, 1);
}
} else {
while (test_and_set_bit(0, &ap->jumbo_refill_busy));
ace_sync_irq(dev->irq);
ace_set_rxtx_parms(dev, 0);
if (ap->jumbo) {
struct cmd cmd;
cmd.evt = C_RESET_JUMBO_RNG;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}
}
return 0;
}
static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
u32 link;
memset(ecmd, 0, sizeof(struct ethtool_cmd));
ecmd->supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_FIBRE);
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_INTERNAL;
link = readl(®s->GigLnkState);
if (link & LNK_1000MB)
ethtool_cmd_speed_set(ecmd, SPEED_1000);
else {
link = readl(®s->FastLnkState);
if (link & LNK_100MB)
ethtool_cmd_speed_set(ecmd, SPEED_100);
else if (link & LNK_10MB)
ethtool_cmd_speed_set(ecmd, SPEED_10);
else
ethtool_cmd_speed_set(ecmd, 0);
}
if (link & LNK_FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
if (link & LNK_NEGOTIATE)
ecmd->autoneg = AUTONEG_ENABLE;
else
ecmd->autoneg = AUTONEG_DISABLE;
#if 0
/*
* Current struct ethtool_cmd is insufficient
*/
ecmd->trace = readl(®s->TuneTrace);
ecmd->txcoal = readl(®s->TuneTxCoalTicks);
ecmd->rxcoal = readl(®s->TuneRxCoalTicks);
#endif
ecmd->maxtxpkt = readl(®s->TuneMaxTxDesc);
ecmd->maxrxpkt = readl(®s->TuneMaxRxDesc);
return 0;
}
static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
u32 link, speed;
link = readl(®s->GigLnkState);
if (link & LNK_1000MB)
speed = SPEED_1000;
else {
link = readl(®s->FastLnkState);
if (link & LNK_100MB)
speed = SPEED_100;
else if (link & LNK_10MB)
speed = SPEED_10;
else
speed = SPEED_100;
}
link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
if (!ACE_IS_TIGON_I(ap))
link |= LNK_TX_FLOW_CTL_Y;
if (ecmd->autoneg == AUTONEG_ENABLE)
link |= LNK_NEGOTIATE;
if (ethtool_cmd_speed(ecmd) != speed) {
link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
switch (ethtool_cmd_speed(ecmd)) {
case SPEED_1000:
link |= LNK_1000MB;
break;
case SPEED_100:
link |= LNK_100MB;
break;
case SPEED_10:
link |= LNK_10MB;
break;
}
}
if (ecmd->duplex == DUPLEX_FULL)
link |= LNK_FULL_DUPLEX;
if (link != ap->link) {
struct cmd cmd;
printk(KERN_INFO "%s: Renegotiating link state\n",
dev->name);
ap->link = link;
writel(link, ®s->TuneLink);
if (!ACE_IS_TIGON_I(ap))
writel(link, ®s->TuneFastLink);
wmb();
cmd.evt = C_LNK_NEGOTIATION;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}
return 0;
}
static void ace_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct ace_private *ap = netdev_priv(dev);
strlcpy(info->driver, "acenic", sizeof(info->driver));
snprintf(info->version, sizeof(info->version), "%i.%i.%i",
ap->firmware_major, ap->firmware_minor,
ap->firmware_fix);
if (ap->pdev)
strlcpy(info->bus_info, pci_name(ap->pdev),
sizeof(info->bus_info));
}
/*
* Set the hardware MAC address.
*/
static int ace_set_mac_addr(struct net_device *dev, void *p)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct sockaddr *addr=p;
u8 *da;
struct cmd cmd;
if(netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
da = (u8 *)dev->dev_addr;
writel(da[0] << 8 | da[1], ®s->MacAddrHi);
writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
®s->MacAddrLo);
cmd.evt = C_SET_MAC_ADDR;
cmd.code = 0;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
return 0;
}
static void ace_set_multicast_list(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct cmd cmd;
if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_ENABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->mcast_all = 1;
} else if (ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_DISABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->mcast_all = 0;
}
if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
cmd.evt = C_SET_PROMISC_MODE;
cmd.code = C_C_PROMISC_ENABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->promisc = 1;
}else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
cmd.evt = C_SET_PROMISC_MODE;
cmd.code = C_C_PROMISC_DISABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
ap->promisc = 0;
}
/*
* For the time being multicast relies on the upper layers
* filtering it properly. The Firmware does not allow one to
* set the entire multicast list at a time and keeping track of
* it here is going to be messy.
*/
if (!netdev_mc_empty(dev) && !ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_ENABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}else if (!ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_DISABLE;
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
}
}
static struct net_device_stats *ace_get_stats(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_mac_stats __iomem *mac_stats =
(struct ace_mac_stats __iomem *)ap->regs->Stats;
dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
dev->stats.multicast = readl(&mac_stats->kept_mc);
dev->stats.collisions = readl(&mac_stats->coll);
return &dev->stats;
}
static void ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
u32 dest, int size)
{
void __iomem *tdest;
short tsize, i;
if (size <= 0)
return;
while (size > 0) {
tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
min_t(u32, size, ACE_WINDOW_SIZE));
tdest = (void __iomem *) ®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
for (i = 0; i < (tsize / 4); i++) {
/* Firmware is big-endian */
writel(be32_to_cpup(src), tdest);
src++;
tdest += 4;
dest += 4;
size -= 4;
}
}
}
static void ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
{
void __iomem *tdest;
short tsize = 0, i;
if (size <= 0)
return;
while (size > 0) {
tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
min_t(u32, size, ACE_WINDOW_SIZE));
tdest = (void __iomem *) ®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
for (i = 0; i < (tsize / 4); i++) {
writel(0, tdest + i*4);
}
dest += tsize;
size -= tsize;
}
}
/*
* Download the firmware into the SRAM on the NIC
*
* This operation requires the NIC to be halted and is performed with
* interrupts disabled and with the spinlock hold.
*/
static int ace_load_firmware(struct net_device *dev)
{
const struct firmware *fw;
const char *fw_name = "acenic/tg2.bin";
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
const __be32 *fw_data;
u32 load_addr;
int ret;
if (!(readl(®s->CpuCtrl) & CPU_HALTED)) {
printk(KERN_ERR "%s: trying to download firmware while the "
"CPU is running!\n", ap->name);
return -EFAULT;
}
if (ACE_IS_TIGON_I(ap))
fw_name = "acenic/tg1.bin";
ret = request_firmware(&fw, fw_name, &ap->pdev->dev);
if (ret) {
printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
ap->name, fw_name);
return ret;
}
fw_data = (void *)fw->data;
/* Firmware blob starts with version numbers, followed by
load and start address. Remainder is the blob to be loaded
contiguously from load address. We don't bother to represent
the BSS/SBSS sections any more, since we were clearing the
whole thing anyway. */
ap->firmware_major = fw->data[0];
ap->firmware_minor = fw->data[1];
ap->firmware_fix = fw->data[2];
ap->firmware_start = be32_to_cpu(fw_data[1]);
if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) {
printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
ap->name, ap->firmware_start, fw_name);
ret = -EINVAL;
goto out;
}
load_addr = be32_to_cpu(fw_data[2]);
if (load_addr < 0x4000 || load_addr >= 0x80000) {
printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
ap->name, load_addr, fw_name);
ret = -EINVAL;
goto out;
}
/*
* Do not try to clear more than 512KiB or we end up seeing
* funny things on NICs with only 512KiB SRAM
*/
ace_clear(regs, 0x2000, 0x80000-0x2000);
ace_copy(regs, &fw_data[3], load_addr, fw->size-12);
out:
release_firmware(fw);
return ret;
}
/*
* The eeprom on the AceNIC is an Atmel i2c EEPROM.
*
* Accessing the EEPROM is `interesting' to say the least - don't read
* this code right after dinner.
*
* This is all about black magic and bit-banging the device .... I
* wonder in what hospital they have put the guy who designed the i2c
* specs.
*
* Oh yes, this is only the beginning!
*
* Thanks to Stevarino Webinski for helping tracking down the bugs in the
* code i2c readout code by beta testing all my hacks.
*/
static void eeprom_start(struct ace_regs __iomem *regs)
{
u32 local;
readl(®s->LocalCtrl);
udelay(ACE_SHORT_DELAY);
local = readl(®s->LocalCtrl);
local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local &= ~EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local &= ~EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
}
static void eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
{
short i;
u32 local;
udelay(ACE_SHORT_DELAY);
local = readl(®s->LocalCtrl);
local &= ~EEPROM_DATA_OUT;
local |= EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
for (i = 0; i < 8; i++, magic <<= 1) {
udelay(ACE_SHORT_DELAY);
if (magic & 0x80)
local |= EEPROM_DATA_OUT;
else
local &= ~EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
}
}
static int eeprom_check_ack(struct ace_regs __iomem *regs)
{
int state;
u32 local;
local = readl(®s->LocalCtrl);
local &= ~EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_LONG_DELAY);
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
/* sample data in middle of high clk */
state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0;
udelay(ACE_SHORT_DELAY);
mb();
writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
return state;
}
static void eeprom_stop(struct ace_regs __iomem *regs)
{
u32 local;
udelay(ACE_SHORT_DELAY);
local = readl(®s->LocalCtrl);
local |= EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local &= ~EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
local |= EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_LONG_DELAY);
local &= ~EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
mb();
}
/*
* Read a whole byte from the EEPROM.
*/
static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
unsigned long flags;
u32 local;
int result = 0;
short i;
/*
* Don't take interrupts on this CPU will bit banging
* the %#%#@$ I2C device
*/
local_irq_save(flags);
eeprom_start(regs);
eeprom_prep(regs, EEPROM_WRITE_SELECT);
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
result = -EIO;
goto eeprom_read_error;
}
eeprom_prep(regs, (offset >> 8) & 0xff);
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to set address byte 0\n",
ap->name);
result = -EIO;
goto eeprom_read_error;
}
eeprom_prep(regs, offset & 0xff);
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to set address byte 1\n",
ap->name);
result = -EIO;
goto eeprom_read_error;
}
eeprom_start(regs);
eeprom_prep(regs, EEPROM_READ_SELECT);
if (eeprom_check_ack(regs)) {
local_irq_restore(flags);
printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
ap->name);
result = -EIO;
goto eeprom_read_error;
}
for (i = 0; i < 8; i++) {
local = readl(®s->LocalCtrl);
local &= ~EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
udelay(ACE_LONG_DELAY);
mb();
local |= EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
/* sample data mid high clk */
result = (result << 1) |
((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0);
udelay(ACE_SHORT_DELAY);
mb();
local = readl(®s->LocalCtrl);
local &= ~EEPROM_CLK_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
udelay(ACE_SHORT_DELAY);
mb();
if (i == 7) {
local |= EEPROM_WRITE_ENABLE;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
}
}
local |= EEPROM_DATA_OUT;
writel(local, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl);
readl(®s->LocalCtrl);
udelay(ACE_LONG_DELAY);
writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
readl(®s->LocalCtrl);
mb();
udelay(ACE_SHORT_DELAY);
eeprom_stop(regs);
local_irq_restore(flags);
out:
return result;
eeprom_read_error:
printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
ap->name, offset);
goto out;
}
module_pci_driver(acenic_pci_driver);
| gpl-2.0 |
Megatron007/ghost | arch/arm/mach-msm/qdsp5/audio_fm.c | 1155 | 3605 | /* arch/arm/mach-msm/qdsp5/audio_fm.c
*
* pcm audio output device
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/msm_audio.h>
#include <mach/debug_mm.h>
#include "audmgr.h"
struct audio {
struct mutex lock;
int opened;
int enabled;
int running;
struct audmgr audmgr;
uint16_t volume;
};
static struct audio fm_audio;
/* must be called with audio->lock held */
static int audio_enable(struct audio *audio)
{
struct audmgr_config cfg;
int rc = 0;
MM_DBG("\n"); /* Macro prints the file name and function */
if (audio->enabled)
return 0;
cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000;
cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000;
cfg.def_method = RPC_AUD_DEF_METHOD_HOST_PCM;
cfg.codec = RPC_AUD_DEF_CODEC_PCM;
cfg.snd_method = RPC_SND_METHOD_VOICE;
rc = audmgr_enable(&audio->audmgr, &cfg);
if (rc < 0)
return rc;
audio->enabled = 1;
return rc;
}
/* must be called with audio->lock held */
static int audio_disable(struct audio *audio)
{
MM_DBG("\n"); /* Macro prints the file name and function */
if (audio->enabled) {
audio->enabled = 0;
audmgr_disable(&audio->audmgr);
}
return 0;
}
static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct audio *audio = file->private_data;
int rc = -EINVAL;
MM_DBG("cmd %d", cmd);
mutex_lock(&audio->lock);
switch (cmd) {
case AUDIO_START:
MM_DBG("AUDIO_START\n");
rc = audio_enable(audio);
break;
case AUDIO_STOP:
MM_DBG("AUDIO_STOP\n");
rc = audio_disable(audio);
audio->running = 0;
audio->enabled = 0;
break;
default:
rc = -EINVAL;
}
mutex_unlock(&audio->lock);
return rc;
}
static int audio_release(struct inode *inode, struct file *file)
{
struct audio *audio = file->private_data;
MM_DBG("audio instance 0x%08x freeing\n", (int)audio);
mutex_lock(&audio->lock);
audio_disable(audio);
audio->running = 0;
audio->enabled = 0;
audio->opened = 0;
mutex_unlock(&audio->lock);
return 0;
}
static int audio_open(struct inode *inode, struct file *file)
{
struct audio *audio = &fm_audio;
int rc = 0;
MM_DBG("\n"); /* Macro prints the file name and function */
mutex_lock(&audio->lock);
if (audio->opened) {
MM_ERR("busy\n");
rc = -EBUSY;
goto done;
}
rc = audmgr_open(&audio->audmgr);
if (rc) {
MM_ERR("%s: failed to register listnet\n", __func__);
goto done;
}
file->private_data = audio;
audio->opened = 1;
done:
mutex_unlock(&audio->lock);
return rc;
}
static const struct file_operations audio_fm_fops = {
.owner = THIS_MODULE,
.open = audio_open,
.release = audio_release,
.unlocked_ioctl = audio_ioctl,
};
struct miscdevice audio_fm_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_fm",
.fops = &audio_fm_fops,
};
static int __init audio_init(void)
{
struct audio *audio = &fm_audio;
mutex_init(&audio->lock);
return misc_register(&audio_fm_misc);
}
device_initcall(audio_init);
MODULE_DESCRIPTION("MSM FM driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
NovaFusion/twrp_kernel | block/blk-map.c | 2179 | 8444 | /*
* Functions related to mapping data to requests
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <scsi/sg.h> /* for struct sg_iovec */
#include "blk.h"
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!ll_back_merge_fn(q, rq, bio))
return -EINVAL;
else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->__data_len += bio->bi_size;
}
return 0;
}
static int __blk_rq_unmap_user(struct bio *bio)
{
int ret = 0;
if (bio) {
if (bio_flagged(bio, BIO_USER_MAPPED))
bio_unmap_user(bio);
else
ret = bio_uncopy_user(bio);
}
return ret;
}
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, void __user *ubuf,
unsigned int len, gfp_t gfp_mask)
{
unsigned long uaddr;
struct bio *bio, *orig_bio;
int reading, ret;
reading = rq_data_dir(rq) == READ;
/*
* if alignment requirement is satisfied, map in user pages for
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
if (blk_rq_aligned(q, uaddr, len) && !map_data)
bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
else
bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (map_data && map_data->null_mapped)
bio->bi_flags |= (1 << BIO_NULL_MAPPED);
orig_bio = bio;
blk_queue_bounce(q, &bio);
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get(bio);
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
return bio->bi_size;
/* if it was boucned we must call the end io function */
bio_endio(bio, 0);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
}
/**
* blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request structure to fill
* @map_data: pointer to the rq_map_data holding pages (if necessary)
* @ubuf: the user buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
*
* Description:
* Data will be mapped directly for zero copy I/O, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
* before being submitted to the device, as pages mapped may be out of
* reach. It's the callers responsibility to make sure this happens. The
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
int blk_rq_map_user(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, void __user *ubuf,
unsigned long len, gfp_t gfp_mask)
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
int ret;
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len)
return -EINVAL;
if (!ubuf && (!map_data || !map_data->null_mapped))
return -EINVAL;
while (bytes_read != len) {
unsigned long map_len, end, start;
map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
>> PAGE_SHIFT;
start = (unsigned long)ubuf >> PAGE_SHIFT;
/*
* A bad offset could cause us to require BIO_MAX_PAGES + 1
* pages. If this happens we just lower the requested
* mapping len by a page so that we can fit
*/
if (end - start > BIO_MAX_PAGES)
map_len -= PAGE_SIZE;
ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
gfp_mask);
if (ret < 0)
goto unmap_rq;
if (!bio)
bio = rq->bio;
bytes_read += ret;
ubuf += ret;
if (map_data)
map_data->offset += ret;
}
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
rq->buffer = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(bio);
rq->bio = NULL;
return ret;
}
EXPORT_SYMBOL(blk_rq_map_user);
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
* @map_data: pointer to the rq_map_data holding pages (if necessary)
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
* @len: I/O byte count
* @gfp_mask: memory allocation flags
*
* Description:
* Data will be mapped directly for zero copy I/O, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
* before being submitted to the device, as pages mapped may be out of
* reach. It's the callers responsibility to make sure this happens. The
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, struct sg_iovec *iov,
int iov_count, unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
int i, read = rq_data_dir(rq) == READ;
int unaligned = 0;
if (!iov || iov_count <= 0)
return -EINVAL;
for (i = 0; i < iov_count; i++) {
unsigned long uaddr = (unsigned long)iov[i].iov_base;
if (!iov[i].iov_len)
return -EINVAL;
/*
* Keep going so we check length of all segments
*/
if (uaddr & queue_dma_alignment(q))
unaligned = 1;
}
if (unaligned || (q->dma_pad_mask & len) || map_data)
bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
gfp_mask);
else
bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (bio->bi_size != len) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
* normal IO completion path
*/
bio_get(bio);
bio_endio(bio, 0);
__blk_rq_unmap_user(bio);
return -EINVAL;
}
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
/**
* blk_rq_unmap_user - unmap a request with user data
* @bio: start of bio list
*
* Description:
* Unmap a rq previously mapped by blk_rq_map_user(). The caller must
* supply the original rq->bio from the blk_rq_map_user() return, since
* the I/O completion may have changed rq->bio.
*/
int blk_rq_unmap_user(struct bio *bio)
{
struct bio *mapped_bio;
int ret = 0, ret2;
while (bio) {
mapped_bio = bio;
if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
mapped_bio = bio->bi_private;
ret2 = __blk_rq_unmap_user(mapped_bio);
if (ret2 && !ret)
ret = ret2;
mapped_bio = bio;
bio = bio->bi_next;
bio_put(mapped_bio);
}
return ret;
}
EXPORT_SYMBOL(blk_rq_unmap_user);
/**
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
* buffer is used. Can be called multple times to append multple
* buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
int reading = rq_data_dir(rq) == READ;
unsigned long addr = (unsigned long) kbuf;
int do_copy = 0;
struct bio *bio;
int ret;
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
bio = bio_map_kern(q, kbuf, len, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= REQ_WRITE;
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
ret = blk_rq_append_bio(q, rq, bio);
if (unlikely(ret)) {
/* request is too big */
bio_put(bio);
return ret;
}
blk_queue_bounce(q, &rq->bio);
rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);
| gpl-2.0 |
lbule/ALPS.L0.MP8.V2.1_LCSH6735_65C_HZ_L_KERNEL | drivers/s390/net/ctcm_mpc.c | 3459 | 59816 | /*
* Copyright IBM Corp. 2004, 2007
* Authors: Belinda Thompson (belindat@us.ibm.com)
* Andy Richter (richtera@us.ibm.com)
* Peter Tiedemann (ptiedem@de.ibm.com)
*/
/*
This module exports functions to be used by CCS:
EXPORT_SYMBOL(ctc_mpc_alloc_channel);
EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
EXPORT_SYMBOL(ctc_mpc_flow_control);
*/
#undef DEBUG
#undef DEBUGDATA
#undef DEBUGCCW
#define KMSG_COMPONENT "ctcm"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <linux/netdevice.h>
#include <net/dst.h>
#include <linux/io.h> /* instead of <asm/io.h> ok ? */
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */
#include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */
#include <linux/wait.h>
#include <linux/moduleparam.h>
#include <asm/idals.h>
#include "ctcm_main.h"
#include "ctcm_mpc.h"
#include "ctcm_fsms.h"
static const struct xid2 init_xid = {
.xid2_type_id = XID_FM2,
.xid2_len = 0x45,
.xid2_adj_id = 0,
.xid2_rlen = 0x31,
.xid2_resv1 = 0,
.xid2_flag1 = 0,
.xid2_fmtt = 0,
.xid2_flag4 = 0x80,
.xid2_resv2 = 0,
.xid2_tgnum = 0,
.xid2_sender_id = 0,
.xid2_flag2 = 0,
.xid2_option = XID2_0,
.xid2_resv3 = "\x00",
.xid2_resv4 = 0,
.xid2_dlc_type = XID2_READ_SIDE,
.xid2_resv5 = 0,
.xid2_mpc_flag = 0,
.xid2_resv6 = 0,
.xid2_buf_len = (MPC_BUFSIZE_DEFAULT - 35),
};
static const struct th_header thnorm = {
.th_seg = 0x00,
.th_ch_flag = TH_IS_XID,
.th_blk_flag = TH_DATA_IS_XID,
.th_is_xid = 0x01,
.th_seq_num = 0x00000000,
};
static const struct th_header thdummy = {
.th_seg = 0x00,
.th_ch_flag = 0x00,
.th_blk_flag = TH_DATA_IS_XID,
.th_is_xid = 0x01,
.th_seq_num = 0x00000000,
};
/*
* Definition of one MPC group
*/
/*
* Compatibility macros for busy handling
* of network devices.
*/
static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb);
/*
* MPC Group state machine actions (static prototypes)
*/
static void mpc_action_nop(fsm_instance *fsm, int event, void *arg);
static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg);
static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg);
static void mpc_action_timeout(fsm_instance *fi, int event, void *arg);
static int mpc_validate_xid(struct mpcg_info *mpcginfo);
static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg);
static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg);
static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg);
static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg);
static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg);
static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg);
#ifdef DEBUGDATA
/*-------------------------------------------------------------------*
* Dump buffer format *
* *
*--------------------------------------------------------------------*/
void ctcmpc_dumpit(char *buf, int len)
{
__u32 ct, sw, rm, dup;
char *ptr, *rptr;
char tbuf[82], tdup[82];
#ifdef CONFIG_64BIT
char addr[22];
#else
char addr[12];
#endif
char boff[12];
char bhex[82], duphex[82];
char basc[40];
sw = 0;
rptr = ptr = buf;
rm = 16;
duphex[0] = 0x00;
dup = 0;
for (ct = 0; ct < len; ct++, ptr++, rptr++) {
if (sw == 0) {
#ifdef CONFIG_64BIT
sprintf(addr, "%16.16llx", (__u64)rptr);
#else
sprintf(addr, "%8.8X", (__u32)rptr);
#endif
sprintf(boff, "%4.4X", (__u32)ct);
bhex[0] = '\0';
basc[0] = '\0';
}
if ((sw == 4) || (sw == 12))
strcat(bhex, " ");
if (sw == 8)
strcat(bhex, " ");
#if CONFIG_64BIT
sprintf(tbuf, "%2.2llX", (__u64)*ptr);
#else
sprintf(tbuf, "%2.2X", (__u32)*ptr);
#endif
tbuf[2] = '\0';
strcat(bhex, tbuf);
if ((0 != isprint(*ptr)) && (*ptr >= 0x20))
basc[sw] = *ptr;
else
basc[sw] = '.';
basc[sw+1] = '\0';
sw++;
rm--;
if (sw != 16)
continue;
if ((strcmp(duphex, bhex)) != 0) {
if (dup != 0) {
sprintf(tdup,
"Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n",
tdup);
}
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
dup = 0;
strcpy(duphex, bhex);
} else
dup++;
sw = 0;
rm = 16;
} /* endfor */
if (sw != 0) {
for ( ; rm > 0; rm--, sw++) {
if ((sw == 4) || (sw == 12))
strcat(bhex, " ");
if (sw == 8)
strcat(bhex, " ");
strcat(bhex, " ");
strcat(basc, " ");
}
if (dup != 0) {
sprintf(tdup, "Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n", tdup);
}
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
} else {
if (dup >= 1) {
sprintf(tdup, "Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n", tdup);
}
if (dup != 0) {
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
}
}
return;
} /* end of ctcmpc_dumpit */
#endif
#ifdef DEBUGDATA
/*
* Dump header and first 16 bytes of an sk_buff for debugging purposes.
*
* skb The sk_buff to dump.
* offset Offset relative to skb-data, where to start the dump.
*/
void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
{
__u8 *p = skb->data;
struct th_header *header;
struct pdu *pheader;
int bl = skb->len;
int i;
if (p == NULL)
return;
p += offset;
header = (struct th_header *)p;
ctcm_pr_debug("dump:\n");
ctcm_pr_debug("skb len=%d \n", skb->len);
if (skb->len > 2) {
switch (header->th_ch_flag) {
case TH_HAS_PDU:
break;
case 0x00:
case TH_IS_XID:
if ((header->th_blk_flag == TH_DATA_IS_XID) &&
(header->th_is_xid == 0x01))
goto dumpth;
case TH_SWEEP_REQ:
goto dumpth;
case TH_SWEEP_RESP:
goto dumpth;
default:
break;
}
pheader = (struct pdu *)p;
ctcm_pr_debug("pdu->offset: %d hex: %04x\n",
pheader->pdu_offset, pheader->pdu_offset);
ctcm_pr_debug("pdu->flag : %02x\n", pheader->pdu_flag);
ctcm_pr_debug("pdu->proto : %02x\n", pheader->pdu_proto);
ctcm_pr_debug("pdu->seq : %02x\n", pheader->pdu_seq);
goto dumpdata;
dumpth:
ctcm_pr_debug("th->seg : %02x\n", header->th_seg);
ctcm_pr_debug("th->ch : %02x\n", header->th_ch_flag);
ctcm_pr_debug("th->blk_flag: %02x\n", header->th_blk_flag);
ctcm_pr_debug("th->type : %s\n",
(header->th_is_xid) ? "DATA" : "XID");
ctcm_pr_debug("th->seqnum : %04x\n", header->th_seq_num);
}
dumpdata:
if (bl > 32)
bl = 32;
ctcm_pr_debug("data: ");
for (i = 0; i < bl; i++)
ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n");
ctcm_pr_debug("\n");
}
#endif
static struct net_device *ctcmpc_get_dev(int port_num)
{
char device[20];
struct net_device *dev;
struct ctcm_priv *priv;
sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
dev = __dev_get_by_name(&init_net, device);
if (dev == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s: Device not found by name: %s",
CTCM_FUNTAIL, device);
return NULL;
}
priv = dev->ml_priv;
if (priv == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): dev->ml_priv is NULL",
CTCM_FUNTAIL, device);
return NULL;
}
if (priv->mpcg == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): priv->mpcg is NULL",
CTCM_FUNTAIL, device);
return NULL;
}
return dev;
}
/*
* ctc_mpc_alloc_channel
* (exported interface)
*
* Device Initialization :
* ACTPATH driven IO operations
*/
int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
{
struct net_device *dev;
struct mpc_group *grp;
struct ctcm_priv *priv;
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return 1;
priv = dev->ml_priv;
grp = priv->mpcg;
grp->allochanfunc = callback;
grp->port_num = port_num;
grp->port_persist = 1;
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"%s(%s): state=%s",
CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_INOP:
/* Group is in the process of terminating */
grp->alloc_called = 1;
break;
case MPCG_STATE_RESET:
/* MPC Group will transition to state */
/* MPCG_STATE_XID2INITW iff the minimum number */
/* of 1 read and 1 write channel have successfully*/
/* activated */
/*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
if (callback)
grp->send_qllc_disc = 1;
case MPCG_STATE_XID0IOWAIT:
fsm_deltimer(&grp->timer);
grp->outstanding_xid2 = 0;
grp->outstanding_xid7 = 0;
grp->outstanding_xid7_p2 = 0;
grp->saved_xid2 = NULL;
if (callback)
ctcm_open(dev);
fsm_event(priv->fsm, DEV_EVENT_START, dev);
break;
case MPCG_STATE_READY:
/* XID exchanges completed after PORT was activated */
/* Link station already active */
/* Maybe timing issue...retry callback */
grp->allocchan_callback_retries++;
if (grp->allocchan_callback_retries < 4) {
if (grp->allochanfunc)
grp->allochanfunc(grp->port_num,
grp->group_max_buflen);
} else {
/* there are problems...bail out */
/* there may be a state mismatch so restart */
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
grp->allocchan_callback_retries = 0;
}
break;
}
return 0;
}
EXPORT_SYMBOL(ctc_mpc_alloc_channel);
/*
* ctc_mpc_establish_connectivity
* (exported interface)
*/
void ctc_mpc_establish_connectivity(int port_num,
void (*callback)(int, int, int))
{
struct net_device *dev;
struct mpc_group *grp;
struct ctcm_priv *priv;
struct channel *rch, *wch;
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return;
priv = dev->ml_priv;
grp = priv->mpcg;
rch = priv->channel[CTCM_READ];
wch = priv->channel[CTCM_WRITE];
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"%s(%s): state=%s",
CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
grp->estconnfunc = callback;
grp->port_num = port_num;
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_READY:
/* XID exchanges completed after PORT was activated */
/* Link station already active */
/* Maybe timing issue...retry callback */
fsm_deltimer(&grp->timer);
grp->estconn_callback_retries++;
if (grp->estconn_callback_retries < 4) {
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, 0,
grp->group_max_buflen);
grp->estconnfunc = NULL;
}
} else {
/* there are problems...bail out */
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
grp->estconn_callback_retries = 0;
}
break;
case MPCG_STATE_INOP:
case MPCG_STATE_RESET:
/* MPC Group is not ready to start XID - min num of */
/* 1 read and 1 write channel have not been acquired*/
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): REJECTED - inactive channels",
CTCM_FUNTAIL, dev->name);
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
}
break;
case MPCG_STATE_XID2INITW:
/* alloc channel was called but no XID exchange */
/* has occurred. initiate xside XID exchange */
/* make sure yside XID0 processing has not started */
if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
(fsm_getstate(wch->fsm) > CH_XID0_PENDING)) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): ABORT - PASSIVE XID",
CTCM_FUNTAIL, dev->name);
break;
}
grp->send_qllc_disc = 1;
fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT);
fsm_deltimer(&grp->timer);
fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
MPCG_EVENT_TIMER, dev);
grp->outstanding_xid7 = 0;
grp->outstanding_xid7_p2 = 0;
grp->saved_xid2 = NULL;
if ((rch->in_mpcgroup) &&
(fsm_getstate(rch->fsm) == CH_XID0_PENDING))
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch);
else {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): RX-%s not ready for ACTIVE XID0",
CTCM_FUNTAIL, dev->name, rch->id);
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer);
goto done;
}
if ((wch->in_mpcgroup) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch);
else {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): WX-%s not ready for ACTIVE XID0",
CTCM_FUNTAIL, dev->name, wch->id);
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer);
goto done;
}
break;
case MPCG_STATE_XID0IOWAIT:
/* already in active XID negotiations */
default:
break;
}
done:
CTCM_PR_DEBUG("Exit %s()\n", __func__);
return;
}
EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
/*
* ctc_mpc_dealloc_ch
* (exported interface)
*/
void ctc_mpc_dealloc_ch(int port_num)
{
struct net_device *dev;
struct ctcm_priv *priv;
struct mpc_group *grp;
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return;
priv = dev->ml_priv;
grp = priv->mpcg;
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
"%s: %s: refcount = %d\n",
CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev));
fsm_deltimer(&priv->restart_timer);
grp->channels_terminating = 0;
fsm_deltimer(&grp->timer);
grp->allochanfunc = NULL;
grp->estconnfunc = NULL;
grp->port_persist = 0;
grp->send_qllc_disc = 0;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
ctcm_close(dev);
return;
}
EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
/*
* ctc_mpc_flow_control
* (exported interface)
*/
void ctc_mpc_flow_control(int port_num, int flowc)
{
struct ctcm_priv *priv;
struct mpc_group *grp;
struct net_device *dev;
struct channel *rch;
int mpcg_state;
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return;
priv = dev->ml_priv;
grp = priv->mpcg;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
"%s: %s: flowc = %d",
CTCM_FUNTAIL, dev->name, flowc);
rch = priv->channel[CTCM_READ];
mpcg_state = fsm_getstate(grp->fsm);
switch (flowc) {
case 1:
if (mpcg_state == MPCG_STATE_FLOWC)
break;
if (mpcg_state == MPCG_STATE_READY) {
if (grp->flow_off_called == 1)
grp->flow_off_called = 0;
else
fsm_newstate(grp->fsm, MPCG_STATE_FLOWC);
break;
}
break;
case 0:
if (mpcg_state == MPCG_STATE_FLOWC) {
fsm_newstate(grp->fsm, MPCG_STATE_READY);
/* ensure any data that has accumulated */
/* on the io_queue will now be sen t */
tasklet_schedule(&rch->ch_tasklet);
}
/* possible race condition */
if (mpcg_state == MPCG_STATE_READY) {
grp->flow_off_called = 1;
break;
}
break;
}
}
EXPORT_SYMBOL(ctc_mpc_flow_control);
static int mpc_send_qllc_discontact(struct net_device *);
/*
* helper function of ctcmpc_unpack_skb
*/
static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
{
struct channel *rch = mpcginfo->ch;
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
grp->sweep_rsp_pend_num--;
if ((grp->sweep_req_pend_num == 0) &&
(grp->sweep_rsp_pend_num == 0)) {
fsm_deltimer(&ch->sweep_timer);
grp->in_sweep = 0;
rch->th_seq_num = 0x00;
ch->th_seq_num = 0x00;
ctcm_clear_busy_do(dev);
}
kfree(mpcginfo);
return;
}
/*
* helper function of mpc_rcvd_sweep_req
* which is a helper of ctcmpc_unpack_skb
*/
static void ctcmpc_send_sweep_resp(struct channel *rch)
{
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct th_sweep *header;
struct sk_buff *sweep_skb;
struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
if (sweep_skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): sweep_skb allocation ERROR\n",
CTCM_FUNTAIL, rch->id);
goto done;
}
header = kmalloc(sizeof(struct th_sweep), gfp_type());
if (!header) {
dev_kfree_skb_any(sweep_skb);
goto done;
}
header->th.th_seg = 0x00 ;
header->th.th_ch_flag = TH_SWEEP_RESP;
header->th.th_blk_flag = 0x00;
header->th.th_is_xid = 0x00;
header->th.th_seq_num = 0x00;
header->sw.th_last_seq = ch->th_seq_num;
memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
kfree(header);
dev->trans_start = jiffies;
skb_queue_tail(&ch->sweep_queue, sweep_skb);
fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
return;
done:
grp->in_sweep = 0;
ctcm_clear_busy_do(dev);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
return;
}
/*
* helper function of ctcmpc_unpack_skb
*/
static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
{
struct channel *rch = mpcginfo->ch;
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *ch = priv->channel[CTCM_WRITE];
if (do_debug)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
" %s(): ch=0x%p id=%s\n", __func__, ch, ch->id);
if (grp->in_sweep == 0) {
grp->in_sweep = 1;
ctcm_test_and_set_busy(dev);
grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
}
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
grp->sweep_req_pend_num--;
ctcmpc_send_sweep_resp(ch);
kfree(mpcginfo);
return;
}
/*
* MPC Group Station FSM definitions
*/
static const char *mpcg_event_names[] = {
[MPCG_EVENT_INOP] = "INOP Condition",
[MPCG_EVENT_DISCONC] = "Discontact Received",
[MPCG_EVENT_XID0DO] = "Channel Active - Start XID",
[MPCG_EVENT_XID2] = "XID2 Received",
[MPCG_EVENT_XID2DONE] = "XID0 Complete",
[MPCG_EVENT_XID7DONE] = "XID7 Complete",
[MPCG_EVENT_TIMER] = "XID Setup Timer",
[MPCG_EVENT_DOIO] = "XID DoIO",
};
static const char *mpcg_state_names[] = {
[MPCG_STATE_RESET] = "Reset",
[MPCG_STATE_INOP] = "INOP",
[MPCG_STATE_XID2INITW] = "Passive XID- XID0 Pending Start",
[MPCG_STATE_XID2INITX] = "Passive XID- XID0 Pending Complete",
[MPCG_STATE_XID7INITW] = "Passive XID- XID7 Pending P1 Start",
[MPCG_STATE_XID7INITX] = "Passive XID- XID7 Pending P2 Complete",
[MPCG_STATE_XID0IOWAIT] = "Active XID- XID0 Pending Start",
[MPCG_STATE_XID0IOWAIX] = "Active XID- XID0 Pending Complete",
[MPCG_STATE_XID7INITI] = "Active XID- XID7 Pending Start",
[MPCG_STATE_XID7INITZ] = "Active XID- XID7 Pending Complete ",
[MPCG_STATE_XID7INITF] = "XID - XID7 Complete ",
[MPCG_STATE_FLOWC] = "FLOW CONTROL ON",
[MPCG_STATE_READY] = "READY",
};
/*
* The MPC Group Station FSM
* 22 events
*/
static const fsm_node mpcg_fsm[] = {
{ MPCG_STATE_RESET, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_INOP, MPCG_EVENT_INOP, mpc_action_nop },
{ MPCG_STATE_FLOWC, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_READY, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_READY, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID2INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID2INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID7INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO, mpc_action_xside_xid },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO, mpc_action_xside_xid },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITI, MPCG_EVENT_DOIO, mpc_action_xside_xid },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_DISCONC, mpc_action_discontact },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_TIMER, mpc_action_timeout },
{ MPCG_STATE_XID7INITZ, MPCG_EVENT_DOIO, mpc_action_xside_xid },
{ MPCG_STATE_XID7INITF, MPCG_EVENT_INOP, mpc_action_go_inop },
{ MPCG_STATE_XID7INITF, MPCG_EVENT_XID7DONE, mpc_action_go_ready },
};
static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
if (grp == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): No MPC group",
CTCM_FUNTAIL, dev->name);
return;
}
fsm_deltimer(&grp->timer);
if (grp->saved_xid2->xid2_flag2 == 0x40) {
priv->xid->xid2_flag2 = 0x00;
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, 1,
grp->group_max_buflen);
grp->estconnfunc = NULL;
} else if (grp->allochanfunc)
grp->send_qllc_disc = 1;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): fails",
CTCM_FUNTAIL, dev->name);
return;
}
grp->port_persist = 1;
grp->out_of_sequence = 0;
grp->estconn_called = 0;
tasklet_hi_schedule(&grp->mpc_tasklet2);
return;
}
/*
* helper of ctcm_init_netdevice
* CTCM_PROTO_MPC only
*/
void mpc_group_ready(unsigned long adev)
{
struct net_device *dev = (struct net_device *)adev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *ch = NULL;
if (grp == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): No MPC group",
CTCM_FUNTAIL, dev->name);
return;
}
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
"%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n",
CTCM_FUNTAIL, dev->name, grp->group_max_buflen);
fsm_newstate(grp->fsm, MPCG_STATE_READY);
/* Put up a read on the channel */
ch = priv->channel[CTCM_READ];
ch->pdu_seq = 0;
CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
__func__, ch->pdu_seq);
ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
/* Put the write channel in idle state */
ch = priv->channel[CTCM_WRITE];
if (ch->collect_len > 0) {
spin_lock(&ch->collect_lock);
ctcm_purge_skb_queue(&ch->collect_queue);
ch->collect_len = 0;
spin_unlock(&ch->collect_lock);
}
ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch);
ctcm_clear_busy(dev);
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, 0,
grp->group_max_buflen);
grp->estconnfunc = NULL;
} else if (grp->allochanfunc)
grp->allochanfunc(grp->port_num, grp->group_max_buflen);
grp->send_qllc_disc = 1;
grp->changed_side = 0;
return;
}
/*
* Increment the MPC Group Active Channel Counts
* helper of dev_action (called from channel fsm)
*/
void mpc_channel_action(struct channel *ch, int direction, int action)
{
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
if (grp == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): No MPC group",
CTCM_FUNTAIL, dev->name);
return;
}
CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
grp->active_channels[CTCM_READ],
grp->active_channels[CTCM_WRITE]);
if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
grp->num_channel_paths++;
grp->active_channels[direction]++;
grp->outstanding_xid2++;
ch->in_mpcgroup = 1;
if (ch->xid_skb != NULL)
dev_kfree_skb_any(ch->xid_skb);
ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
GFP_ATOMIC | GFP_DMA);
if (ch->xid_skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): Couldn't alloc ch xid_skb\n",
CTCM_FUNTAIL, dev->name);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
return;
}
ch->xid_skb_data = ch->xid_skb->data;
ch->xid_th = (struct th_header *)ch->xid_skb->data;
skb_put(ch->xid_skb, TH_HEADER_LENGTH);
ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb);
skb_put(ch->xid_skb, XID2_LENGTH);
ch->xid_id = skb_tail_pointer(ch->xid_skb);
ch->xid_skb->data = ch->xid_skb_data;
skb_reset_tail_pointer(ch->xid_skb);
ch->xid_skb->len = 0;
memcpy(skb_put(ch->xid_skb, grp->xid_skb->len),
grp->xid_skb->data,
grp->xid_skb->len);
ch->xid->xid2_dlc_type =
((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? XID2_READ_SIDE : XID2_WRITE_SIDE);
if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
ch->xid->xid2_buf_len = 0x00;
ch->xid_skb->data = ch->xid_skb_data;
skb_reset_tail_pointer(ch->xid_skb);
ch->xid_skb->len = 0;
fsm_newstate(ch->fsm, CH_XID0_PENDING);
if ((grp->active_channels[CTCM_READ] > 0) &&
(grp->active_channels[CTCM_WRITE] > 0) &&
(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
"%s: %s: MPC GROUP CHANNELS ACTIVE\n",
__func__, dev->name);
}
} else if ((action == MPC_CHANNEL_REMOVE) &&
(ch->in_mpcgroup == 1)) {
ch->in_mpcgroup = 0;
grp->num_channel_paths--;
grp->active_channels[direction]--;
if (ch->xid_skb != NULL)
dev_kfree_skb_any(ch->xid_skb);
ch->xid_skb = NULL;
if (grp->channels_terminating)
goto done;
if (((grp->active_channels[CTCM_READ] == 0) &&
(grp->active_channels[CTCM_WRITE] > 0))
|| ((grp->active_channels[CTCM_WRITE] == 0) &&
(grp->active_channels[CTCM_READ] > 0)))
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
done:
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
"exit %s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
grp->active_channels[CTCM_READ],
grp->active_channels[CTCM_WRITE]);
CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
}
/**
* Unpack a just received skb and hand it over to
* upper layers.
* special MPC version of unpack_skb.
*
* ch The channel where this skb has been received.
* pskb The received skb.
*/
static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
{
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct pdu *curr_pdu;
struct mpcg_info *mpcginfo;
struct th_header *header = NULL;
struct th_sweep *sweep = NULL;
int pdu_last_seen = 0;
__u32 new_len;
struct sk_buff *skb;
int skblen;
int sendrc = 0;
CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n",
__func__, dev->name, smp_processor_id(), ch->id);
header = (struct th_header *)pskb->data;
if ((header->th_seg == 0) &&
(header->th_ch_flag == 0) &&
(header->th_blk_flag == 0) &&
(header->th_seq_num == 0))
/* nothing for us */ goto done;
CTCM_PR_DBGDATA("%s: th_header\n", __func__);
CTCM_D3_DUMP((char *)header, TH_HEADER_LENGTH);
CTCM_PR_DBGDATA("%s: pskb len: %04x \n", __func__, pskb->len);
pskb->dev = dev;
pskb->ip_summed = CHECKSUM_UNNECESSARY;
skb_pull(pskb, TH_HEADER_LENGTH);
if (likely(header->th_ch_flag == TH_HAS_PDU)) {
CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__);
if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) ||
((fsm_getstate(grp->fsm) == MPCG_STATE_READY) &&
(header->th_seq_num != ch->th_seq_num + 1) &&
(ch->th_seq_num != 0))) {
/* This is NOT the next segment *
* we are not the correct race winner *
* go away and let someone else win *
* BUT..this only applies if xid negot *
* is done *
*/
grp->out_of_sequence += 1;
__skb_push(pskb, TH_HEADER_LENGTH);
skb_queue_tail(&ch->io_queue, pskb);
CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x "
"got:%08x\n", __func__,
ch->th_seq_num + 1, header->th_seq_num);
return;
}
grp->out_of_sequence = 0;
ch->th_seq_num = header->th_seq_num;
CTCM_PR_DBGDATA("ctcmpc: %s() FromVTAM_th_seq=%08x\n",
__func__, ch->th_seq_num);
if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY))
goto done;
while ((pskb->len > 0) && !pdu_last_seen) {
curr_pdu = (struct pdu *)pskb->data;
CTCM_PR_DBGDATA("%s: pdu_header\n", __func__);
CTCM_D3_DUMP((char *)pskb->data, PDU_HEADER_LENGTH);
CTCM_PR_DBGDATA("%s: pskb len: %04x \n",
__func__, pskb->len);
skb_pull(pskb, PDU_HEADER_LENGTH);
if (curr_pdu->pdu_flag & PDU_LAST)
pdu_last_seen = 1;
if (curr_pdu->pdu_flag & PDU_CNTL)
pskb->protocol = htons(ETH_P_SNAP);
else
pskb->protocol = htons(ETH_P_SNA_DIX);
if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): Dropping packet with "
"illegal siize %d",
CTCM_FUNTAIL, dev->name, pskb->len);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
goto done;
}
skb_reset_mac_header(pskb);
new_len = curr_pdu->pdu_offset;
CTCM_PR_DBGDATA("%s: new_len: %04x \n",
__func__, new_len);
if ((new_len == 0) || (new_len > pskb->len)) {
/* should never happen */
/* pskb len must be hosed...bail out */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): non valid pdu_offset: %04x",
/* "data may be lost", */
CTCM_FUNTAIL, dev->name, new_len);
goto done;
}
skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC);
if (!skb) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): MEMORY allocation error",
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
goto done;
}
memcpy(skb_put(skb, new_len), pskb->data, new_len);
skb_reset_mac_header(skb);
skb->dev = pskb->dev;
skb->protocol = pskb->protocol;
skb->ip_summed = CHECKSUM_UNNECESSARY;
*((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
ch->pdu_seq++;
if (do_debug_data) {
ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n",
__func__, ch->pdu_seq);
ctcm_pr_debug("%s: skb:%0lx "
"skb len: %d \n", __func__,
(unsigned long)skb, skb->len);
ctcm_pr_debug("%s: up to 32 bytes "
"of pdu_data sent\n", __func__);
ctcmpc_dump32((char *)skb->data, skb->len);
}
skblen = skb->len;
sendrc = netif_rx(skb);
priv->stats.rx_packets++;
priv->stats.rx_bytes += skblen;
skb_pull(pskb, new_len); /* point to next PDU */
}
} else {
mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type());
if (mpcginfo == NULL)
goto done;
mpcginfo->ch = ch;
mpcginfo->th = header;
mpcginfo->skb = pskb;
CTCM_PR_DEBUG("%s: Not PDU - may be control pkt\n",
__func__);
/* it's a sweep? */
sweep = (struct th_sweep *)pskb->data;
mpcginfo->sweep = sweep;
if (header->th_ch_flag == TH_SWEEP_REQ)
mpc_rcvd_sweep_req(mpcginfo);
else if (header->th_ch_flag == TH_SWEEP_RESP)
mpc_rcvd_sweep_resp(mpcginfo);
else if (header->th_blk_flag == TH_DATA_IS_XID) {
struct xid2 *thisxid = (struct xid2 *)pskb->data;
skb_pull(pskb, XID2_LENGTH);
mpcginfo->xid = thisxid;
fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo);
} else if (header->th_blk_flag == TH_DISCONTACT)
fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo);
else if (header->th_seq_num != 0) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): control pkt expected\n",
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
/* mpcginfo only used for non-data transfers */
kfree(mpcginfo);
if (do_debug_data)
ctcmpc_dump_skb(pskb, -8);
}
}
done:
dev_kfree_skb_any(pskb);
if (sendrc == NET_RX_DROP) {
dev_warn(&dev->dev,
"The network backlog for %s is exceeded, "
"package dropped\n", __func__);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
__func__, dev->name, ch, ch->id);
}
/**
* tasklet helper for mpc's skb unpacking.
*
* ch The channel to work on.
* Allow flow control back pressure to occur here.
* Throttling back channel can result in excessive
* channel inactivity and system deact of channel
*/
void ctcmpc_bh(unsigned long thischan)
{
struct channel *ch = (struct channel *)thischan;
struct sk_buff *skb;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n",
dev->name, smp_processor_id(), __func__, ch->id);
/* caller has requested driver to throttle back */
while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) &&
(skb = skb_dequeue(&ch->io_queue))) {
ctcmpc_unpack_skb(ch, skb);
if (grp->out_of_sequence > 20) {
/* assume data loss has occurred if */
/* missing seq_num for extended */
/* period of time */
grp->out_of_sequence = 0;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
break;
}
if (skb == skb_peek(&ch->io_queue))
break;
}
CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
__func__, dev->name, ch, ch->id);
return;
}
/*
* MPC Group Initializations
*/
struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
{
struct mpc_group *grp;
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"Enter %s(%p)", CTCM_FUNTAIL, priv);
grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL);
if (grp == NULL)
return NULL;
grp->fsm = init_fsm("mpcg", mpcg_state_names, mpcg_event_names,
MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm,
mpcg_fsm_len, GFP_KERNEL);
if (grp->fsm == NULL) {
kfree(grp);
return NULL;
}
fsm_newstate(grp->fsm, MPCG_STATE_RESET);
fsm_settimer(grp->fsm, &grp->timer);
grp->xid_skb =
__dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
if (grp->xid_skb == NULL) {
kfree_fsm(grp->fsm);
kfree(grp);
return NULL;
}
/* base xid for all channels in group */
grp->xid_skb_data = grp->xid_skb->data;
grp->xid_th = (struct th_header *)grp->xid_skb->data;
memcpy(skb_put(grp->xid_skb, TH_HEADER_LENGTH),
&thnorm, TH_HEADER_LENGTH);
grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb);
memcpy(skb_put(grp->xid_skb, XID2_LENGTH), &init_xid, XID2_LENGTH);
grp->xid->xid2_adj_id = jiffies | 0xfff00000;
grp->xid->xid2_sender_id = jiffies;
grp->xid_id = skb_tail_pointer(grp->xid_skb);
memcpy(skb_put(grp->xid_skb, 4), "VTAM", 4);
grp->rcvd_xid_skb =
__dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
if (grp->rcvd_xid_skb == NULL) {
kfree_fsm(grp->fsm);
dev_kfree_skb(grp->xid_skb);
kfree(grp);
return NULL;
}
grp->rcvd_xid_data = grp->rcvd_xid_skb->data;
grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH),
&thnorm, TH_HEADER_LENGTH);
grp->saved_xid2 = NULL;
priv->xid = grp->xid;
priv->mpcg = grp;
return grp;
}
/*
* The MPC Group Station FSM
*/
/*
* MPC Group Station FSM actions
* CTCM_PROTO_MPC only
*/
/**
* NOP action for statemachines
*/
static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
{
}
/*
* invoked when the device transitions to dev_stopped
* MPC will stop each individual channel if a single XID failure
* occurs, or will intitiate all channels be stopped if a GROUP
* level failure occurs.
*/
static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv;
struct mpc_group *grp;
struct channel *wch;
CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
priv = dev->ml_priv;
grp = priv->mpcg;
grp->flow_off_called = 0;
fsm_deltimer(&grp->timer);
if (grp->channels_terminating)
return;
grp->channels_terminating = 1;
grp->saved_state = fsm_getstate(grp->fsm);
fsm_newstate(grp->fsm, MPCG_STATE_INOP);
if (grp->saved_state > MPCG_STATE_XID7INITF)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s(%s): MPC GROUP INOPERATIVE",
CTCM_FUNTAIL, dev->name);
if ((grp->saved_state != MPCG_STATE_RESET) ||
/* dealloc_channel has been called */
(grp->port_persist == 0))
fsm_deltimer(&priv->restart_timer);
wch = priv->channel[CTCM_WRITE];
switch (grp->saved_state) {
case MPCG_STATE_RESET:
case MPCG_STATE_INOP:
case MPCG_STATE_XID2INITW:
case MPCG_STATE_XID0IOWAIT:
case MPCG_STATE_XID2INITX:
case MPCG_STATE_XID7INITW:
case MPCG_STATE_XID7INITX:
case MPCG_STATE_XID0IOWAIX:
case MPCG_STATE_XID7INITI:
case MPCG_STATE_XID7INITZ:
case MPCG_STATE_XID7INITF:
break;
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
default:
tasklet_hi_schedule(&wch->ch_disc_tasklet);
}
grp->xid2_tgnum = 0;
grp->group_max_buflen = 0; /*min of all received */
grp->outstanding_xid2 = 0;
grp->outstanding_xid7 = 0;
grp->outstanding_xid7_p2 = 0;
grp->saved_xid2 = NULL;
grp->xidnogood = 0;
grp->changed_side = 0;
grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
skb_reset_tail_pointer(grp->rcvd_xid_skb);
grp->rcvd_xid_skb->len = 0;
grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH), &thnorm,
TH_HEADER_LENGTH);
if (grp->send_qllc_disc == 1) {
grp->send_qllc_disc = 0;
mpc_send_qllc_discontact(dev);
}
/* DO NOT issue DEV_EVENT_STOP directly out of this code */
/* This can result in INOP of VTAM PU due to halting of */
/* outstanding IO which causes a sense to be returned */
/* Only about 3 senses are allowed and then IOS/VTAM will*/
/* become unreachable without manual intervention */
if ((grp->port_persist == 1) || (grp->alloc_called)) {
grp->alloc_called = 0;
fsm_deltimer(&priv->restart_timer);
fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev);
fsm_newstate(grp->fsm, MPCG_STATE_RESET);
if (grp->saved_state > MPCG_STATE_XID7INITF)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
"%s(%s): MPC GROUP RECOVERY SCHEDULED",
CTCM_FUNTAIL, dev->name);
} else {
fsm_deltimer(&priv->restart_timer);
fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev);
fsm_newstate(grp->fsm, MPCG_STATE_RESET);
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
"%s(%s): NO MPC GROUP RECOVERY ATTEMPTED",
CTCM_FUNTAIL, dev->name);
}
}
/**
* Handle mpc group action timeout.
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*
* fi An instance of an mpc_group fsm.
* event The event, just happened.
* arg Generic pointer, casted from net_device * upon call.
*/
static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv;
struct mpc_group *grp;
struct channel *wch;
struct channel *rch;
priv = dev->ml_priv;
grp = priv->mpcg;
wch = priv->channel[CTCM_WRITE];
rch = priv->channel[CTCM_READ];
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
/* Unless there is outstanding IO on the */
/* channel just return and wait for ATTN */
/* interrupt to begin XID negotiations */
if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
break;
default:
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
"%s: dev=%s exit",
CTCM_FUNTAIL, dev->name);
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
{
struct mpcg_info *mpcginfo = arg;
struct channel *ch = mpcginfo->ch;
struct net_device *dev;
struct ctcm_priv *priv;
struct mpc_group *grp;
if (ch) {
dev = ch->netdev;
if (dev) {
priv = dev->ml_priv;
if (priv) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s: %s: %s\n",
CTCM_FUNTAIL, dev->name, ch->id);
grp = priv->mpcg;
grp->send_qllc_disc = 1;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
}
}
return;
}
/*
* MPC Group Station - not part of FSM
* CTCM_PROTO_MPC only
* called from add_channel in ctcm_main.c
*/
void mpc_action_send_discontact(unsigned long thischan)
{
int rc;
struct channel *ch = (struct channel *)thischan;
unsigned long saveflags = 0;
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
rc = ccw_device_start(ch->cdev, &ch->ccw[15],
(unsigned long)ch, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
ctcm_ccw_check_rc(ch, rc, (char *)__func__);
}
return;
}
/*
* helper function of mpc FSM
* CTCM_PROTO_MPC only
* mpc_action_rcvd_xid7
*/
static int mpc_validate_xid(struct mpcg_info *mpcginfo)
{
struct channel *ch = mpcginfo->ch;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct xid2 *xid = mpcginfo->xid;
int rc = 0;
__u64 our_id = 0;
__u64 their_id = 0;
int len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
CTCM_PR_DEBUG("Enter %s: xid=%p\n", __func__, xid);
if (xid == NULL) {
rc = 1;
/* XID REJECTED: xid == NULL */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): xid = NULL",
CTCM_FUNTAIL, ch->id);
goto done;
}
CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
/*the received direction should be the opposite of ours */
if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
XID2_READ_SIDE) != xid->xid2_dlc_type) {
rc = 2;
/* XID REJECTED: r/w channel pairing mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): r/w channel pairing mismatch",
CTCM_FUNTAIL, ch->id);
goto done;
}
if (xid->xid2_dlc_type == XID2_READ_SIDE) {
CTCM_PR_DEBUG("%s: grpmaxbuf:%d xid2buflen:%d\n", __func__,
grp->group_max_buflen, xid->xid2_buf_len);
if (grp->group_max_buflen == 0 || grp->group_max_buflen >
xid->xid2_buf_len - len)
grp->group_max_buflen = xid->xid2_buf_len - len;
}
if (grp->saved_xid2 == NULL) {
grp->saved_xid2 =
(struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb);
memcpy(skb_put(grp->rcvd_xid_skb,
XID2_LENGTH), xid, XID2_LENGTH);
grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
skb_reset_tail_pointer(grp->rcvd_xid_skb);
grp->rcvd_xid_skb->len = 0;
/* convert two 32 bit numbers into 1 64 bit for id compare */
our_id = (__u64)priv->xid->xid2_adj_id;
our_id = our_id << 32;
our_id = our_id + priv->xid->xid2_sender_id;
their_id = (__u64)xid->xid2_adj_id;
their_id = their_id << 32;
their_id = their_id + xid->xid2_sender_id;
/* lower id assume the xside role */
if (our_id < their_id) {
grp->roll = XSIDE;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s(%s): WE HAVE LOW ID - TAKE XSIDE",
CTCM_FUNTAIL, ch->id);
} else {
grp->roll = YSIDE;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s(%s): WE HAVE HIGH ID - TAKE YSIDE",
CTCM_FUNTAIL, ch->id);
}
} else {
if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) {
rc = 3;
/* XID REJECTED: xid flag byte4 mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): xid flag byte4 mismatch",
CTCM_FUNTAIL, ch->id);
}
if (xid->xid2_flag2 == 0x40) {
rc = 4;
/* XID REJECTED - xid NOGOOD */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): xid NOGOOD",
CTCM_FUNTAIL, ch->id);
}
if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) {
rc = 5;
/* XID REJECTED - Adjacent Station ID Mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): Adjacent Station ID Mismatch",
CTCM_FUNTAIL, ch->id);
}
if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) {
rc = 6;
/* XID REJECTED - Sender Address Mismatch */
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): Sender Address Mismatch",
CTCM_FUNTAIL, ch->id);
}
}
done:
if (rc) {
dev_warn(&dev->dev,
"The XID used in the MPC protocol is not valid, "
"rc = %d\n", rc);
priv->xid->xid2_flag2 = 0x40;
grp->saved_xid2->xid2_flag2 = 0x40;
}
return rc;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
{
struct channel *ch = arg;
int rc = 0;
int gotlock = 0;
unsigned long saveflags = 0; /* avoids compiler warning with
spin_unlock_irqrestore */
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
if (ctcm_checkalloc_buffer(ch))
goto done;
/*
* skb data-buffer referencing:
*/
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
/* result of the previous 3 statements is NOT always
* already set after ctcm_checkalloc_buffer
* because of possible reuse of the trans_skb
*/
memset(ch->trans_skb->data, 0, 16);
ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
/* check is main purpose here: */
skb_put(ch->trans_skb, TH_HEADER_LENGTH);
ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb);
/* check is main purpose here: */
skb_put(ch->trans_skb, XID2_LENGTH);
ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb);
/* cleanup back to startpoint */
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
/* non-checking rewrite of above skb data-buffer referencing: */
/*
memset(ch->trans_skb->data, 0, 16);
ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH);
ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH;
*/
ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[8].count = 0;
ch->ccw[8].cda = 0x00;
if (!(ch->xid_th && ch->xid && ch->xid_id))
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
"%s(%s): xid_th=%p, xid=%p, xid_id=%p",
CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id);
if (side == XSIDE) {
/* mpc_action_xside_xid */
if (ch->xid_th == NULL)
goto done;
ch->ccw[9].cmd_code = CCW_CMD_WRITE;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
ch->ccw[9].cda = virt_to_phys(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[10].cmd_code = CCW_CMD_WRITE;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
ch->ccw[10].cda = virt_to_phys(ch->xid);
ch->ccw[11].cmd_code = CCW_CMD_READ;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
ch->ccw[12].cmd_code = CCW_CMD_READ;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
ch->ccw[13].cmd_code = CCW_CMD_READ;
ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
} else { /* side == YSIDE : mpc_action_yside_xid */
ch->ccw[9].cmd_code = CCW_CMD_READ;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
ch->ccw[10].cmd_code = CCW_CMD_READ;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
if (ch->xid_th == NULL)
goto done;
ch->ccw[11].cmd_code = CCW_CMD_WRITE;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
ch->ccw[11].cda = virt_to_phys(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[12].cmd_code = CCW_CMD_WRITE;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
ch->ccw[12].cda = virt_to_phys(ch->xid);
if (ch->xid_id == NULL)
goto done;
ch->ccw[13].cmd_code = CCW_CMD_WRITE;
ch->ccw[13].cda = virt_to_phys(ch->xid_id);
}
ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[13].count = 4;
ch->ccw[14].cmd_code = CCW_CMD_NOOP;
ch->ccw[14].flags = CCW_FLAG_SLI;
ch->ccw[14].count = 0;
ch->ccw[14].cda = 0;
CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7);
CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH);
CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH);
CTCM_D3_DUMP((char *)ch->xid_id, 4);
if (!in_irq()) {
/* Such conditional locking is a known problem for
* sparse because its static undeterministic.
* Warnings should be ignored here. */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
gotlock = 1;
}
fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch);
rc = ccw_device_start(ch->cdev, &ch->ccw[8],
(unsigned long)ch, 0xff, 0);
if (gotlock) /* see remark above about conditional locking */
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
ctcm_ccw_check_rc(ch, rc,
(side == XSIDE) ? "x-side XID" : "y-side XID");
}
done:
CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n",
__func__, ch, ch->id);
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg)
{
mpc_action_side_xid(fsm, arg, XSIDE);
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg)
{
mpc_action_side_xid(fsm, arg, YSIDE);
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
if (ch->xid == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): ch->xid == NULL",
CTCM_FUNTAIL, dev->name);
return;
}
fsm_newstate(ch->fsm, CH_XID0_INPROGRESS);
ch->xid->xid2_option = XID2_0;
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
case MPCG_STATE_XID2INITX:
ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
break;
case MPCG_STATE_XID0IOWAIT:
case MPCG_STATE_XID0IOWAIX:
ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
break;
}
fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
{
struct net_device *dev = arg;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = NULL;
int direction;
int send = 0;
if (priv)
grp = priv->mpcg;
if (grp == NULL)
return;
for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
struct xid2 *thisxid = ch->xid;
ch->xid_skb->data = ch->xid_skb_data;
skb_reset_tail_pointer(ch->xid_skb);
ch->xid_skb->len = 0;
thisxid->xid2_option = XID2_7;
send = 0;
/* xid7 phase 1 */
if (grp->outstanding_xid7_p2 > 0) {
if (grp->roll == YSIDE) {
if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) {
fsm_newstate(ch->fsm, CH_XID7_PENDING2);
ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
memcpy(skb_put(ch->xid_skb,
TH_HEADER_LENGTH),
&thdummy, TH_HEADER_LENGTH);
send = 1;
}
} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) {
fsm_newstate(ch->fsm, CH_XID7_PENDING2);
ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
memcpy(skb_put(ch->xid_skb,
TH_HEADER_LENGTH),
&thnorm, TH_HEADER_LENGTH);
send = 1;
}
} else {
/* xid7 phase 2 */
if (grp->roll == YSIDE) {
if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) {
fsm_newstate(ch->fsm, CH_XID7_PENDING4);
memcpy(skb_put(ch->xid_skb,
TH_HEADER_LENGTH),
&thnorm, TH_HEADER_LENGTH);
ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
send = 1;
}
} else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) {
fsm_newstate(ch->fsm, CH_XID7_PENDING4);
ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
memcpy(skb_put(ch->xid_skb, TH_HEADER_LENGTH),
&thdummy, TH_HEADER_LENGTH);
send = 1;
}
}
if (send)
fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
}
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
{
struct mpcg_info *mpcginfo = arg;
struct channel *ch = mpcginfo->ch;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n",
__func__, ch->id, grp->outstanding_xid2,
grp->outstanding_xid7, grp->outstanding_xid7_p2);
if (fsm_getstate(ch->fsm) < CH_XID7_PENDING)
fsm_newstate(ch->fsm, CH_XID7_PENDING);
grp->outstanding_xid2--;
grp->outstanding_xid7++;
grp->outstanding_xid7_p2++;
/* must change state before validating xid to */
/* properly handle interim interrupts received*/
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX);
mpc_validate_xid(mpcginfo);
break;
case MPCG_STATE_XID0IOWAIT:
fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX);
mpc_validate_xid(mpcginfo);
break;
case MPCG_STATE_XID2INITX:
if (grp->outstanding_xid2 == 0) {
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW);
mpc_validate_xid(mpcginfo);
fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
}
break;
case MPCG_STATE_XID0IOWAIX:
if (grp->outstanding_xid2 == 0) {
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI);
mpc_validate_xid(mpcginfo);
fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
}
break;
}
kfree(mpcginfo);
CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
__func__, ch->id, grp->outstanding_xid2,
grp->outstanding_xid7, grp->outstanding_xid7_p2);
CTCM_PR_DEBUG("ctcmpc:%s() %s grpstate: %s chanstate: %s \n",
__func__, ch->id,
fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm));
return;
}
/*
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
{
struct mpcg_info *mpcginfo = arg;
struct channel *ch = mpcginfo->ch;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
__func__, smp_processor_id(), ch, ch->id);
CTCM_PR_DEBUG("%s: outstanding_xid7: %i, outstanding_xid7_p2: %i\n",
__func__, grp->outstanding_xid7, grp->outstanding_xid7_p2);
grp->outstanding_xid7--;
ch->xid_skb->data = ch->xid_skb_data;
skb_reset_tail_pointer(ch->xid_skb);
ch->xid_skb->len = 0;
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID7INITI:
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ);
mpc_validate_xid(mpcginfo);
break;
case MPCG_STATE_XID7INITW:
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX);
mpc_validate_xid(mpcginfo);
break;
case MPCG_STATE_XID7INITZ:
case MPCG_STATE_XID7INITX:
if (grp->outstanding_xid7 == 0) {
if (grp->outstanding_xid7_p2 > 0) {
grp->outstanding_xid7 =
grp->outstanding_xid7_p2;
grp->outstanding_xid7_p2 = 0;
} else
fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF);
mpc_validate_xid(mpcginfo);
fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
break;
}
mpc_validate_xid(mpcginfo);
break;
}
kfree(mpcginfo);
return;
}
/*
* mpc_action helper of an MPC Group Station FSM action
* CTCM_PROTO_MPC only
*/
static int mpc_send_qllc_discontact(struct net_device *dev)
{
__u32 new_len = 0;
struct sk_buff *skb;
struct qllc *qllcptr;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
__func__, mpcg_state_names[grp->saved_state]);
switch (grp->saved_state) {
/*
* establish conn callback function is
* preferred method to report failure
*/
case MPCG_STATE_XID0IOWAIT:
case MPCG_STATE_XID0IOWAIX:
case MPCG_STATE_XID7INITI:
case MPCG_STATE_XID7INITZ:
case MPCG_STATE_XID2INITW:
case MPCG_STATE_XID2INITX:
case MPCG_STATE_XID7INITW:
case MPCG_STATE_XID7INITX:
if (grp->estconnfunc) {
grp->estconnfunc(grp->port_num, -1, 0);
grp->estconnfunc = NULL;
break;
}
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
grp->send_qllc_disc = 2;
new_len = sizeof(struct qllc);
qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA);
if (qllcptr == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): qllcptr allocation error",
CTCM_FUNTAIL, dev->name);
return -ENOMEM;
}
qllcptr->qllc_address = 0xcc;
qllcptr->qllc_commands = 0x03;
skb = __dev_alloc_skb(new_len, GFP_ATOMIC);
if (skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): skb allocation error",
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
kfree(qllcptr);
return -ENOMEM;
}
memcpy(skb_put(skb, new_len), qllcptr, new_len);
kfree(qllcptr);
if (skb_headroom(skb) < 4) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): skb_headroom error",
CTCM_FUNTAIL, dev->name);
dev_kfree_skb_any(skb);
return -ENOMEM;
}
*((__u32 *)skb_push(skb, 4)) =
priv->channel[CTCM_READ]->pdu_seq;
priv->channel[CTCM_READ]->pdu_seq++;
CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
__func__, priv->channel[CTCM_READ]->pdu_seq);
/* receipt of CC03 resets anticipated sequence number on
receiving side */
priv->channel[CTCM_READ]->pdu_seq = 0x00;
skb_reset_mac_header(skb);
skb->dev = dev;
skb->protocol = htons(ETH_P_SNAP);
skb->ip_summed = CHECKSUM_UNNECESSARY;
CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4));
netif_rx(skb);
break;
default:
break;
}
return 0;
}
/* --- This is the END my friend --- */
| gpl-2.0 |
goodhanrry/N915S_goodHanrry_kernel_lollipop | drivers/scsi/libsas/sas_port.c | 4483 | 9500 | /*
* Serial Attached SCSI (SAS) Port class
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "sas_internal.h"
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "../scsi_sas_internal.h"
static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
{
struct sas_ha_struct *sas_ha = phy->ha;
if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
SAS_ADDR_SIZE) != 0 || (sas_ha->strict_wide_ports &&
memcmp(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE) != 0))
return false;
return true;
}
static void sas_resume_port(struct asd_sas_phy *phy)
{
struct domain_device *dev;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
if (si->dft->lldd_port_formed)
si->dft->lldd_port_formed(phy);
if (port->suspended)
port->suspended = 0;
else {
/* we only need to handle "link returned" actions once */
return;
}
/* if the port came back:
* 1/ presume every device came back
* 2/ force the next revalidation to check all expander phys
*/
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
int i, rc;
rc = sas_notify_lldd_dev_found(dev);
if (rc) {
sas_unregister_dev(port, dev);
continue;
}
if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
dev->ex_dev.ex_change_count = -1;
for (i = 0; i < dev->ex_dev.num_phys; i++) {
struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
phy->phy_change_count = -1;
}
}
}
sas_discover_event(port, DISCE_RESUME);
}
/**
* sas_form_port -- add this phy to a port
* @phy: the phy of interest
*
* This function adds this phy to an existing port, thus creating a wide
* port, or it creates a port and adds the phy to the port.
*/
static void sas_form_port(struct asd_sas_phy *phy)
{
int i;
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
unsigned long flags;
if (port) {
if (!phy_is_wideport_member(port, phy))
sas_deform_port(phy, 0);
else if (phy->suspended) {
phy->suspended = 0;
sas_resume_port(phy);
/* phy came back, try to cancel the timeout */
wake_up(&sas_ha->eh_wait_q);
return;
} else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
__func__, phy->id, phy->port->id,
phy->port->num_phys);
return;
}
}
/* see if the phy should be part of a wide port */
spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
for (i = 0; i < sas_ha->num_phys; i++) {
port = sas_ha->sas_port[i];
spin_lock(&port->phy_list_lock);
if (*(u64 *) port->sas_addr &&
phy_is_wideport_member(port, phy) && port->num_phys > 0) {
/* wide port */
SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
port->id);
break;
}
spin_unlock(&port->phy_list_lock);
}
/* The phy does not match any existing port, create a new one */
if (i == sas_ha->num_phys) {
for (i = 0; i < sas_ha->num_phys; i++) {
port = sas_ha->sas_port[i];
spin_lock(&port->phy_list_lock);
if (*(u64 *)port->sas_addr == 0
&& port->num_phys == 0) {
memcpy(port->sas_addr, phy->sas_addr,
SAS_ADDR_SIZE);
break;
}
spin_unlock(&port->phy_list_lock);
}
}
if (i >= sas_ha->num_phys) {
printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
__func__);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
return;
}
/* add the phy to the port */
list_add_tail(&phy->port_phy_el, &port->phy_list);
sas_phy_set_target(phy, port->port_dev);
phy->port = port;
port->num_phys++;
port->phy_mask |= (1U << phy->id);
if (*(u64 *)port->attached_sas_addr == 0) {
port->class = phy->class;
memcpy(port->attached_sas_addr, phy->attached_sas_addr,
SAS_ADDR_SIZE);
port->iproto = phy->iproto;
port->tproto = phy->tproto;
port->oob_mode = phy->oob_mode;
port->linkrate = phy->linkrate;
} else
port->linkrate = max(port->linkrate, phy->linkrate);
spin_unlock(&port->phy_list_lock);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
if (!port->port) {
port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
BUG_ON(!port->port);
sas_port_add(port->port);
}
sas_port_add_phy(port->port, phy->phy);
SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
dev_name(&phy->phy->dev), dev_name(&port->port->dev),
port->phy_mask,
SAS_ADDR(port->attached_sas_addr));
if (port->port_dev)
port->port_dev->pathways = port->num_phys;
/* Tell the LLDD about this port formation. */
if (si->dft->lldd_port_formed)
si->dft->lldd_port_formed(phy);
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
}
/**
* sas_deform_port -- remove this phy from the port it belongs to
* @phy: the phy of interest
*
* This is called when the physical link to the other phy has been
* lost (on this phy), in Event thread context. We cannot delay here.
*/
void sas_deform_port(struct asd_sas_phy *phy, int gone)
{
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
struct domain_device *dev;
unsigned long flags;
if (!port)
return; /* done by a phy event */
dev = port->port_dev;
if (dev)
dev->pathways--;
if (port->num_phys == 1) {
sas_unregister_domain_devices(port, gone);
sas_port_delete(port->port);
port->port = NULL;
} else {
sas_port_delete_phy(port->port, phy->phy);
sas_device_set_phy(dev, port->port);
}
if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
spin_lock(&port->phy_list_lock);
list_del_init(&phy->port_phy_el);
sas_phy_set_target(phy, NULL);
phy->port = NULL;
port->num_phys--;
port->phy_mask &= ~(1U << phy->id);
if (port->num_phys == 0) {
INIT_LIST_HEAD(&port->phy_list);
memset(port->sas_addr, 0, SAS_ADDR_SIZE);
memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE);
port->class = 0;
port->iproto = 0;
port->tproto = 0;
port->oob_mode = 0;
port->phy_mask = 0;
}
spin_unlock(&port->phy_list_lock);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
return;
}
/* ---------- SAS port events ---------- */
void sas_porte_bytes_dmaed(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
sas_form_port(phy);
}
void sas_porte_broadcast_rcvd(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
unsigned long flags;
u32 prim;
clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
spin_lock_irqsave(&phy->sas_prim_lock, flags);
prim = phy->sas_prim;
spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
SAS_DPRINTK("broadcast received: %d\n", prim);
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
}
void sas_porte_link_reset_err(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
sas_deform_port(phy, 1);
}
void sas_porte_timer_event(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
sas_deform_port(phy, 1);
}
void sas_porte_hard_reset(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
sas_deform_port(phy, 1);
}
/* ---------- SAS port registration ---------- */
static void sas_init_port(struct asd_sas_port *port,
struct sas_ha_struct *sas_ha, int i)
{
memset(port, 0, sizeof(*port));
port->id = i;
INIT_LIST_HEAD(&port->dev_list);
INIT_LIST_HEAD(&port->disco_list);
INIT_LIST_HEAD(&port->destroy_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->ha = sas_ha;
spin_lock_init(&port->dev_list_lock);
}
int sas_register_ports(struct sas_ha_struct *sas_ha)
{
int i;
/* initialize the ports and discovery */
for (i = 0; i < sas_ha->num_phys; i++) {
struct asd_sas_port *port = sas_ha->sas_port[i];
sas_init_port(port, sas_ha, i);
sas_init_disc(&port->disc, port);
}
return 0;
}
void sas_unregister_ports(struct sas_ha_struct *sas_ha)
{
int i;
for (i = 0; i < sas_ha->num_phys; i++)
if (sas_ha->sas_phy[i]->port)
sas_deform_port(sas_ha->sas_phy[i], 0);
}
| gpl-2.0 |
longqzh/chronnOS | drivers/video/gxt4500.c | 4739 | 21460 | /*
* Frame buffer device for IBM GXT4500P and GXT6000P display adaptors
*
* Copyright (C) 2006 Paul Mackerras, IBM Corp. <paulus@samba.org>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/delay.h>
#include <linux/string.h>
#define PCI_DEVICE_ID_IBM_GXT4500P 0x21c
#define PCI_DEVICE_ID_IBM_GXT6000P 0x170
/* GXT4500P registers */
/* Registers in PCI config space */
#define CFG_ENDIAN0 0x40
/* Misc control/status registers */
#define STATUS 0x1000
#define CTRL_REG0 0x1004
#define CR0_HALT_DMA 0x4
#define CR0_RASTER_RESET 0x8
#define CR0_GEOM_RESET 0x10
#define CR0_MEM_CTRLER_RESET 0x20
/* Framebuffer control registers */
#define FB_AB_CTRL 0x1100
#define FB_CD_CTRL 0x1104
#define FB_WID_CTRL 0x1108
#define FB_Z_CTRL 0x110c
#define FB_VGA_CTRL 0x1110
#define REFRESH_AB_CTRL 0x1114
#define REFRESH_CD_CTRL 0x1118
#define FB_OVL_CTRL 0x111c
#define FB_CTRL_TYPE 0x80000000
#define FB_CTRL_WIDTH_MASK 0x007f0000
#define FB_CTRL_WIDTH_SHIFT 16
#define FB_CTRL_START_SEG_MASK 0x00003fff
#define REFRESH_START 0x1098
#define REFRESH_SIZE 0x109c
/* "Direct" framebuffer access registers */
#define DFA_FB_A 0x11e0
#define DFA_FB_B 0x11e4
#define DFA_FB_C 0x11e8
#define DFA_FB_D 0x11ec
#define DFA_FB_ENABLE 0x80000000
#define DFA_FB_BASE_MASK 0x03f00000
#define DFA_FB_STRIDE_1k 0x00000000
#define DFA_FB_STRIDE_2k 0x00000010
#define DFA_FB_STRIDE_4k 0x00000020
#define DFA_PIX_8BIT 0x00000000
#define DFA_PIX_16BIT_565 0x00000001
#define DFA_PIX_16BIT_1555 0x00000002
#define DFA_PIX_24BIT 0x00000004
#define DFA_PIX_32BIT 0x00000005
/* maps DFA_PIX_* to pixel size in bytes */
static const unsigned char pixsize[] = {
1, 2, 2, 2, 4, 4
};
/* Display timing generator registers */
#define DTG_CONTROL 0x1900
#define DTG_CTL_SCREEN_REFRESH 2
#define DTG_CTL_ENABLE 1
#define DTG_HORIZ_EXTENT 0x1904
#define DTG_HORIZ_DISPLAY 0x1908
#define DTG_HSYNC_START 0x190c
#define DTG_HSYNC_END 0x1910
#define DTG_HSYNC_END_COMP 0x1914
#define DTG_VERT_EXTENT 0x1918
#define DTG_VERT_DISPLAY 0x191c
#define DTG_VSYNC_START 0x1920
#define DTG_VSYNC_END 0x1924
#define DTG_VERT_SHORT 0x1928
/* PLL/RAMDAC registers */
#define DISP_CTL 0x402c
#define DISP_CTL_OFF 2
#define SYNC_CTL 0x4034
#define SYNC_CTL_SYNC_ON_RGB 1
#define SYNC_CTL_SYNC_OFF 2
#define SYNC_CTL_HSYNC_INV 8
#define SYNC_CTL_VSYNC_INV 0x10
#define SYNC_CTL_HSYNC_OFF 0x20
#define SYNC_CTL_VSYNC_OFF 0x40
#define PLL_M 0x4040
#define PLL_N 0x4044
#define PLL_POSTDIV 0x4048
#define PLL_C 0x404c
/* Hardware cursor */
#define CURSOR_X 0x4078
#define CURSOR_Y 0x407c
#define CURSOR_HOTSPOT 0x4080
#define CURSOR_MODE 0x4084
#define CURSOR_MODE_OFF 0
#define CURSOR_MODE_4BPP 1
#define CURSOR_PIXMAP 0x5000
#define CURSOR_CMAP 0x7400
/* Window attribute table */
#define WAT_FMT 0x4100
#define WAT_FMT_24BIT 0
#define WAT_FMT_16BIT_565 1
#define WAT_FMT_16BIT_1555 2
#define WAT_FMT_32BIT 3 /* 0 vs. 3 is a guess */
#define WAT_FMT_8BIT_332 9
#define WAT_FMT_8BIT 0xa
#define WAT_FMT_NO_CMAP 4 /* ORd in to other values */
#define WAT_CMAP_OFFSET 0x4104 /* 4-bit value gets << 6 */
#define WAT_CTRL 0x4108
#define WAT_CTRL_SEL_B 1 /* select B buffer if 1 */
#define WAT_CTRL_NO_INC 2
#define WAT_GAMMA_CTRL 0x410c
#define WAT_GAMMA_DISABLE 1 /* disables gamma cmap */
#define WAT_OVL_CTRL 0x430c /* controls overlay */
/* Indexed by DFA_PIX_* values */
static const unsigned char watfmt[] = {
WAT_FMT_8BIT, WAT_FMT_16BIT_565, WAT_FMT_16BIT_1555, 0,
WAT_FMT_24BIT, WAT_FMT_32BIT
};
/* Colormap array; 1k entries of 4 bytes each */
#define CMAP 0x6000
#define readreg(par, reg) readl((par)->regs + (reg))
#define writereg(par, reg, val) writel((val), (par)->regs + (reg))
struct gxt4500_par {
void __iomem *regs;
int pixfmt; /* pixel format, see DFA_PIX_* values */
/* PLL parameters */
int refclk_ps; /* ref clock period in picoseconds */
int pll_m; /* ref clock divisor */
int pll_n; /* VCO divisor */
int pll_pd1; /* first post-divisor */
int pll_pd2; /* second post-divisor */
u32 pseudo_palette[16]; /* used in color blits */
};
/* mode requested by user */
static char *mode_option;
/* default mode: 1280x1024 @ 60 Hz, 8 bpp */
static const struct fb_videomode defaultmode __devinitdata = {
.refresh = 60,
.xres = 1280,
.yres = 1024,
.pixclock = 9295,
.left_margin = 248,
.right_margin = 48,
.upper_margin = 38,
.lower_margin = 1,
.hsync_len = 112,
.vsync_len = 3,
.vmode = FB_VMODE_NONINTERLACED
};
/* List of supported cards */
enum gxt_cards {
GXT4500P,
GXT6000P
};
/* Card-specific information */
static const struct cardinfo {
int refclk_ps; /* period of PLL reference clock in ps */
const char *cardname;
} cardinfo[] = {
[GXT4500P] = { .refclk_ps = 9259, .cardname = "IBM GXT4500P" },
[GXT6000P] = { .refclk_ps = 40000, .cardname = "IBM GXT6000P" },
};
/*
* The refclk and VCO dividers appear to use a linear feedback shift
* register, which gets reloaded when it reaches a terminal value, at
* which point the divider output is toggled. Thus one can obtain
* whatever divisor is required by putting the appropriate value into
* the reload register. For a divisor of N, one puts the value from
* the LFSR sequence that comes N-1 places before the terminal value
* into the reload register.
*/
static const unsigned char mdivtab[] = {
/* 1 */ 0x3f, 0x00, 0x20, 0x10, 0x28, 0x14, 0x2a, 0x15, 0x0a,
/* 10 */ 0x25, 0x32, 0x19, 0x0c, 0x26, 0x13, 0x09, 0x04, 0x22, 0x11,
/* 20 */ 0x08, 0x24, 0x12, 0x29, 0x34, 0x1a, 0x2d, 0x36, 0x1b, 0x0d,
/* 30 */ 0x06, 0x23, 0x31, 0x38, 0x1c, 0x2e, 0x17, 0x0b, 0x05, 0x02,
/* 40 */ 0x21, 0x30, 0x18, 0x2c, 0x16, 0x2b, 0x35, 0x3a, 0x1d, 0x0e,
/* 50 */ 0x27, 0x33, 0x39, 0x3c, 0x1e, 0x2f, 0x37, 0x3b, 0x3d, 0x3e,
/* 60 */ 0x1f, 0x0f, 0x07, 0x03, 0x01,
};
static const unsigned char ndivtab[] = {
/* 2 */ 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0x78, 0xbc, 0x5e,
/* 10 */ 0x2f, 0x17, 0x0b, 0x85, 0xc2, 0xe1, 0x70, 0x38, 0x9c, 0x4e,
/* 20 */ 0xa7, 0xd3, 0xe9, 0xf4, 0xfa, 0xfd, 0xfe, 0x7f, 0xbf, 0xdf,
/* 30 */ 0xef, 0x77, 0x3b, 0x1d, 0x8e, 0xc7, 0xe3, 0x71, 0xb8, 0xdc,
/* 40 */ 0x6e, 0xb7, 0x5b, 0x2d, 0x16, 0x8b, 0xc5, 0xe2, 0xf1, 0xf8,
/* 50 */ 0xfc, 0x7e, 0x3f, 0x9f, 0xcf, 0x67, 0xb3, 0xd9, 0x6c, 0xb6,
/* 60 */ 0xdb, 0x6d, 0x36, 0x9b, 0x4d, 0x26, 0x13, 0x89, 0xc4, 0x62,
/* 70 */ 0xb1, 0xd8, 0xec, 0xf6, 0xfb, 0x7d, 0xbe, 0x5f, 0xaf, 0x57,
/* 80 */ 0x2b, 0x95, 0x4a, 0x25, 0x92, 0x49, 0xa4, 0x52, 0x29, 0x94,
/* 90 */ 0xca, 0x65, 0xb2, 0x59, 0x2c, 0x96, 0xcb, 0xe5, 0xf2, 0x79,
/* 100 */ 0x3c, 0x1e, 0x0f, 0x07, 0x83, 0x41, 0x20, 0x90, 0x48, 0x24,
/* 110 */ 0x12, 0x09, 0x84, 0x42, 0xa1, 0x50, 0x28, 0x14, 0x8a, 0x45,
/* 120 */ 0xa2, 0xd1, 0xe8, 0x74, 0xba, 0xdd, 0xee, 0xf7, 0x7b, 0x3d,
/* 130 */ 0x9e, 0x4f, 0x27, 0x93, 0xc9, 0xe4, 0x72, 0x39, 0x1c, 0x0e,
/* 140 */ 0x87, 0xc3, 0x61, 0x30, 0x18, 0x8c, 0xc6, 0x63, 0x31, 0x98,
/* 150 */ 0xcc, 0xe6, 0x73, 0xb9, 0x5c, 0x2e, 0x97, 0x4b, 0xa5, 0xd2,
/* 160 */ 0x69,
};
static int calc_pll(int period_ps, struct gxt4500_par *par)
{
int m, n, pdiv1, pdiv2, postdiv;
int pll_period, best_error, t, intf;
/* only deal with range 5MHz - 300MHz */
if (period_ps < 3333 || period_ps > 200000)
return -1;
best_error = 1000000;
for (pdiv1 = 1; pdiv1 <= 8; ++pdiv1) {
for (pdiv2 = 1; pdiv2 <= pdiv1; ++pdiv2) {
postdiv = pdiv1 * pdiv2;
pll_period = DIV_ROUND_UP(period_ps, postdiv);
/* keep pll in range 350..600 MHz */
if (pll_period < 1666 || pll_period > 2857)
continue;
for (m = 1; m <= 64; ++m) {
intf = m * par->refclk_ps;
if (intf > 500000)
break;
n = intf * postdiv / period_ps;
if (n < 3 || n > 160)
continue;
t = par->refclk_ps * m * postdiv / n;
t -= period_ps;
if (t >= 0 && t < best_error) {
par->pll_m = m;
par->pll_n = n;
par->pll_pd1 = pdiv1;
par->pll_pd2 = pdiv2;
best_error = t;
}
}
}
}
if (best_error == 1000000)
return -1;
return 0;
}
static int calc_pixclock(struct gxt4500_par *par)
{
return par->refclk_ps * par->pll_m * par->pll_pd1 * par->pll_pd2
/ par->pll_n;
}
static int gxt4500_var_to_par(struct fb_var_screeninfo *var,
struct gxt4500_par *par)
{
if (var->xres + var->xoffset > var->xres_virtual ||
var->yres + var->yoffset > var->yres_virtual ||
var->xres_virtual > 4096)
return -EINVAL;
if ((var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED)
return -EINVAL;
if (calc_pll(var->pixclock, par) < 0)
return -EINVAL;
switch (var->bits_per_pixel) {
case 32:
if (var->transp.length)
par->pixfmt = DFA_PIX_32BIT;
else
par->pixfmt = DFA_PIX_24BIT;
break;
case 24:
par->pixfmt = DFA_PIX_24BIT;
break;
case 16:
if (var->green.length == 5)
par->pixfmt = DFA_PIX_16BIT_1555;
else
par->pixfmt = DFA_PIX_16BIT_565;
break;
case 8:
par->pixfmt = DFA_PIX_8BIT;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct fb_bitfield eightbits = {0, 8};
static const struct fb_bitfield nobits = {0, 0};
static void gxt4500_unpack_pixfmt(struct fb_var_screeninfo *var,
int pixfmt)
{
var->bits_per_pixel = pixsize[pixfmt] * 8;
var->red = eightbits;
var->green = eightbits;
var->blue = eightbits;
var->transp = nobits;
switch (pixfmt) {
case DFA_PIX_16BIT_565:
var->red.length = 5;
var->green.length = 6;
var->blue.length = 5;
break;
case DFA_PIX_16BIT_1555:
var->red.length = 5;
var->green.length = 5;
var->blue.length = 5;
var->transp.length = 1;
break;
case DFA_PIX_32BIT:
var->transp.length = 8;
break;
}
if (pixfmt != DFA_PIX_8BIT) {
var->green.offset = var->red.length;
var->blue.offset = var->green.offset + var->green.length;
if (var->transp.length)
var->transp.offset =
var->blue.offset + var->blue.length;
}
}
static int gxt4500_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct gxt4500_par par;
int err;
par = *(struct gxt4500_par *)info->par;
err = gxt4500_var_to_par(var, &par);
if (!err) {
var->pixclock = calc_pixclock(&par);
gxt4500_unpack_pixfmt(var, par.pixfmt);
}
return err;
}
static int gxt4500_set_par(struct fb_info *info)
{
struct gxt4500_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
int err;
u32 ctrlreg, tmp;
unsigned int dfa_ctl, pixfmt, stride;
unsigned int wid_tiles, i;
unsigned int prefetch_pix, htot;
struct gxt4500_par save_par;
save_par = *par;
err = gxt4500_var_to_par(var, par);
if (err) {
*par = save_par;
return err;
}
/* turn off DTG for now */
ctrlreg = readreg(par, DTG_CONTROL);
ctrlreg &= ~(DTG_CTL_ENABLE | DTG_CTL_SCREEN_REFRESH);
writereg(par, DTG_CONTROL, ctrlreg);
/* set PLL registers */
tmp = readreg(par, PLL_C) & ~0x7f;
if (par->pll_n < 38)
tmp |= 0x29;
if (par->pll_n < 69)
tmp |= 0x35;
else if (par->pll_n < 100)
tmp |= 0x76;
else
tmp |= 0x7e;
writereg(par, PLL_C, tmp);
writereg(par, PLL_M, mdivtab[par->pll_m - 1]);
writereg(par, PLL_N, ndivtab[par->pll_n - 2]);
tmp = ((8 - par->pll_pd2) << 3) | (8 - par->pll_pd1);
if (par->pll_pd1 == 8 || par->pll_pd2 == 8) {
/* work around erratum */
writereg(par, PLL_POSTDIV, tmp | 0x9);
udelay(1);
}
writereg(par, PLL_POSTDIV, tmp);
msleep(20);
/* turn off hardware cursor */
writereg(par, CURSOR_MODE, CURSOR_MODE_OFF);
/* reset raster engine */
writereg(par, CTRL_REG0, CR0_RASTER_RESET | (CR0_RASTER_RESET << 16));
udelay(10);
writereg(par, CTRL_REG0, CR0_RASTER_RESET << 16);
/* set display timing generator registers */
htot = var->xres + var->left_margin + var->right_margin +
var->hsync_len;
writereg(par, DTG_HORIZ_EXTENT, htot - 1);
writereg(par, DTG_HORIZ_DISPLAY, var->xres - 1);
writereg(par, DTG_HSYNC_START, var->xres + var->right_margin - 1);
writereg(par, DTG_HSYNC_END,
var->xres + var->right_margin + var->hsync_len - 1);
writereg(par, DTG_HSYNC_END_COMP,
var->xres + var->right_margin + var->hsync_len - 1);
writereg(par, DTG_VERT_EXTENT,
var->yres + var->upper_margin + var->lower_margin +
var->vsync_len - 1);
writereg(par, DTG_VERT_DISPLAY, var->yres - 1);
writereg(par, DTG_VSYNC_START, var->yres + var->lower_margin - 1);
writereg(par, DTG_VSYNC_END,
var->yres + var->lower_margin + var->vsync_len - 1);
prefetch_pix = 3300000 / var->pixclock;
if (prefetch_pix >= htot)
prefetch_pix = htot - 1;
writereg(par, DTG_VERT_SHORT, htot - prefetch_pix - 1);
ctrlreg |= DTG_CTL_ENABLE | DTG_CTL_SCREEN_REFRESH;
writereg(par, DTG_CONTROL, ctrlreg);
/* calculate stride in DFA aperture */
if (var->xres_virtual > 2048) {
stride = 4096;
dfa_ctl = DFA_FB_STRIDE_4k;
} else if (var->xres_virtual > 1024) {
stride = 2048;
dfa_ctl = DFA_FB_STRIDE_2k;
} else {
stride = 1024;
dfa_ctl = DFA_FB_STRIDE_1k;
}
/* Set up framebuffer definition */
wid_tiles = (var->xres_virtual + 63) >> 6;
/* XXX add proper FB allocation here someday */
writereg(par, FB_AB_CTRL, FB_CTRL_TYPE | (wid_tiles << 16) | 0);
writereg(par, REFRESH_AB_CTRL, FB_CTRL_TYPE | (wid_tiles << 16) | 0);
writereg(par, FB_CD_CTRL, FB_CTRL_TYPE | (wid_tiles << 16) | 0);
writereg(par, REFRESH_CD_CTRL, FB_CTRL_TYPE | (wid_tiles << 16) | 0);
writereg(par, REFRESH_START, (var->xoffset << 16) | var->yoffset);
writereg(par, REFRESH_SIZE, (var->xres << 16) | var->yres);
/* Set up framebuffer access by CPU */
pixfmt = par->pixfmt;
dfa_ctl |= DFA_FB_ENABLE | pixfmt;
writereg(par, DFA_FB_A, dfa_ctl);
/*
* Set up window attribute table.
* We set all WAT entries the same so it doesn't matter what the
* window ID (WID) plane contains.
*/
for (i = 0; i < 32; ++i) {
writereg(par, WAT_FMT + (i << 4), watfmt[pixfmt]);
writereg(par, WAT_CMAP_OFFSET + (i << 4), 0);
writereg(par, WAT_CTRL + (i << 4), 0);
writereg(par, WAT_GAMMA_CTRL + (i << 4), WAT_GAMMA_DISABLE);
}
/* Set sync polarity etc. */
ctrlreg = readreg(par, SYNC_CTL) &
~(SYNC_CTL_SYNC_ON_RGB | SYNC_CTL_HSYNC_INV |
SYNC_CTL_VSYNC_INV);
if (var->sync & FB_SYNC_ON_GREEN)
ctrlreg |= SYNC_CTL_SYNC_ON_RGB;
if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
ctrlreg |= SYNC_CTL_HSYNC_INV;
if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
ctrlreg |= SYNC_CTL_VSYNC_INV;
writereg(par, SYNC_CTL, ctrlreg);
info->fix.line_length = stride * pixsize[pixfmt];
info->fix.visual = (pixfmt == DFA_PIX_8BIT)? FB_VISUAL_PSEUDOCOLOR:
FB_VISUAL_DIRECTCOLOR;
return 0;
}
static int gxt4500_setcolreg(unsigned int reg, unsigned int red,
unsigned int green, unsigned int blue,
unsigned int transp, struct fb_info *info)
{
u32 cmap_entry;
struct gxt4500_par *par = info->par;
if (reg > 1023)
return 1;
cmap_entry = ((transp & 0xff00) << 16) | ((red & 0xff00) << 8) |
(green & 0xff00) | (blue >> 8);
writereg(par, CMAP + reg * 4, cmap_entry);
if (reg < 16 && par->pixfmt != DFA_PIX_8BIT) {
u32 *pal = info->pseudo_palette;
u32 val = reg;
switch (par->pixfmt) {
case DFA_PIX_16BIT_565:
val |= (reg << 11) | (reg << 6);
break;
case DFA_PIX_16BIT_1555:
val |= (reg << 10) | (reg << 5);
break;
case DFA_PIX_32BIT:
val |= (reg << 24);
/* fall through */
case DFA_PIX_24BIT:
val |= (reg << 16) | (reg << 8);
break;
}
pal[reg] = val;
}
return 0;
}
static int gxt4500_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct gxt4500_par *par = info->par;
if (var->xoffset & 7)
return -EINVAL;
if (var->xoffset + var->xres > var->xres_virtual ||
var->yoffset + var->yres > var->yres_virtual)
return -EINVAL;
writereg(par, REFRESH_START, (var->xoffset << 16) | var->yoffset);
return 0;
}
static int gxt4500_blank(int blank, struct fb_info *info)
{
struct gxt4500_par *par = info->par;
int ctrl, dctl;
ctrl = readreg(par, SYNC_CTL);
ctrl &= ~(SYNC_CTL_SYNC_OFF | SYNC_CTL_HSYNC_OFF | SYNC_CTL_VSYNC_OFF);
dctl = readreg(par, DISP_CTL);
dctl |= DISP_CTL_OFF;
switch (blank) {
case FB_BLANK_UNBLANK:
dctl &= ~DISP_CTL_OFF;
break;
case FB_BLANK_POWERDOWN:
ctrl |= SYNC_CTL_SYNC_OFF;
break;
case FB_BLANK_HSYNC_SUSPEND:
ctrl |= SYNC_CTL_HSYNC_OFF;
break;
case FB_BLANK_VSYNC_SUSPEND:
ctrl |= SYNC_CTL_VSYNC_OFF;
break;
default: ;
}
writereg(par, SYNC_CTL, ctrl);
writereg(par, DISP_CTL, dctl);
return 0;
}
static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
.id = "IBM GXT4500P",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 8,
.ypanstep = 1,
.mmio_len = 0x20000,
};
static struct fb_ops gxt4500_ops = {
.owner = THIS_MODULE,
.fb_check_var = gxt4500_check_var,
.fb_set_par = gxt4500_set_par,
.fb_setcolreg = gxt4500_setcolreg,
.fb_pan_display = gxt4500_pan_display,
.fb_blank = gxt4500_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/* PCI functions */
static int __devinit gxt4500_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err;
unsigned long reg_phys, fb_phys;
struct gxt4500_par *par;
struct fb_info *info;
struct fb_var_screeninfo var;
enum gxt_cards cardtype;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "gxt4500: cannot enable PCI device: %d\n",
err);
return err;
}
reg_phys = pci_resource_start(pdev, 0);
if (!request_mem_region(reg_phys, pci_resource_len(pdev, 0),
"gxt4500 regs")) {
dev_err(&pdev->dev, "gxt4500: cannot get registers\n");
goto err_nodev;
}
fb_phys = pci_resource_start(pdev, 1);
if (!request_mem_region(fb_phys, pci_resource_len(pdev, 1),
"gxt4500 FB")) {
dev_err(&pdev->dev, "gxt4500: cannot get framebuffer\n");
goto err_free_regs;
}
info = framebuffer_alloc(sizeof(struct gxt4500_par), &pdev->dev);
if (!info) {
dev_err(&pdev->dev, "gxt4500: cannot alloc FB info record\n");
goto err_free_fb;
}
par = info->par;
cardtype = ent->driver_data;
par->refclk_ps = cardinfo[cardtype].refclk_ps;
info->fix = gxt4500_fix;
strlcpy(info->fix.id, cardinfo[cardtype].cardname,
sizeof(info->fix.id));
info->pseudo_palette = par->pseudo_palette;
info->fix.mmio_start = reg_phys;
par->regs = pci_ioremap_bar(pdev, 0);
if (!par->regs) {
dev_err(&pdev->dev, "gxt4500: cannot map registers\n");
goto err_free_all;
}
info->fix.smem_start = fb_phys;
info->fix.smem_len = pci_resource_len(pdev, 1);
info->screen_base = pci_ioremap_bar(pdev, 1);
if (!info->screen_base) {
dev_err(&pdev->dev, "gxt4500: cannot map framebuffer\n");
goto err_unmap_regs;
}
pci_set_drvdata(pdev, info);
/* Set byte-swapping for DFA aperture for all pixel sizes */
pci_write_config_dword(pdev, CFG_ENDIAN0, 0x333300);
info->fbops = &gxt4500_ops;
info->flags = FBINFO_FLAG_DEFAULT;
err = fb_alloc_cmap(&info->cmap, 256, 0);
if (err) {
dev_err(&pdev->dev, "gxt4500: cannot allocate cmap\n");
goto err_unmap_all;
}
gxt4500_blank(FB_BLANK_UNBLANK, info);
if (!fb_find_mode(&var, info, mode_option, NULL, 0, &defaultmode, 8)) {
dev_err(&pdev->dev, "gxt4500: cannot find valid video mode\n");
goto err_free_cmap;
}
info->var = var;
if (gxt4500_set_par(info)) {
printk(KERN_ERR "gxt4500: cannot set video mode\n");
goto err_free_cmap;
}
if (register_framebuffer(info) < 0) {
dev_err(&pdev->dev, "gxt4500: cannot register framebuffer\n");
goto err_free_cmap;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
return 0;
err_free_cmap:
fb_dealloc_cmap(&info->cmap);
err_unmap_all:
iounmap(info->screen_base);
err_unmap_regs:
iounmap(par->regs);
err_free_all:
framebuffer_release(info);
err_free_fb:
release_mem_region(fb_phys, pci_resource_len(pdev, 1));
err_free_regs:
release_mem_region(reg_phys, pci_resource_len(pdev, 0));
err_nodev:
return -ENODEV;
}
static void __devexit gxt4500_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct gxt4500_par *par;
if (!info)
return;
par = info->par;
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
iounmap(par->regs);
iounmap(info->screen_base);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
release_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
framebuffer_release(info);
}
/* supported chipsets */
static const struct pci_device_id gxt4500_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT4500P),
.driver_data = GXT4500P },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT6000P),
.driver_data = GXT6000P },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, gxt4500_pci_tbl);
static struct pci_driver gxt4500_driver = {
.name = "gxt4500",
.id_table = gxt4500_pci_tbl,
.probe = gxt4500_probe,
.remove = __devexit_p(gxt4500_remove),
};
static int __devinit gxt4500_init(void)
{
#ifndef MODULE
if (fb_get_options("gxt4500", &mode_option))
return -ENODEV;
#endif
return pci_register_driver(&gxt4500_driver);
}
module_init(gxt4500_init);
static void __exit gxt4500_exit(void)
{
pci_unregister_driver(&gxt4500_driver);
}
module_exit(gxt4500_exit);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("FBDev driver for IBM GXT4500P/6000P");
MODULE_LICENSE("GPL");
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\"");
| gpl-2.0 |
neighborhoodhacker/lge-kernel-gproj | drivers/media/video/saa7134/saa7134-core.c | 4995 | 37105 | /*
*
* device driver for philips saa7134 based TV cards
* driver core
*
* (c) 2001-03 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/sound.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/dma-mapping.h>
#include <linux/pm.h>
#include "saa7134-reg.h"
#include "saa7134.h"
MODULE_DESCRIPTION("v4l2 driver module for saa7130/34 based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
MODULE_VERSION(SAA7134_VERSION);
/* ------------------------------------------------------------------ */
static unsigned int irq_debug;
module_param(irq_debug, int, 0644);
MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]");
static unsigned int core_debug;
module_param(core_debug, int, 0644);
MODULE_PARM_DESC(core_debug,"enable debug messages [core]");
static unsigned int gpio_tracking;
module_param(gpio_tracking, int, 0644);
MODULE_PARM_DESC(gpio_tracking,"enable debug messages [gpio]");
static unsigned int alsa = 1;
module_param(alsa, int, 0644);
MODULE_PARM_DESC(alsa,"enable/disable ALSA DMA sound [dmasound]");
static unsigned int latency = UNSET;
module_param(latency, int, 0444);
MODULE_PARM_DESC(latency,"pci latency timer");
int saa7134_no_overlay=-1;
module_param_named(no_overlay, saa7134_no_overlay, int, 0444);
MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)"
" [some VIA/SIS chipsets are known to have problem with overlay]");
static unsigned int video_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
static unsigned int vbi_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
static unsigned int radio_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
static unsigned int tuner[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
static unsigned int card[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
module_param_array(radio_nr, int, NULL, 0444);
module_param_array(tuner, int, NULL, 0444);
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(video_nr, "video device number");
MODULE_PARM_DESC(vbi_nr, "vbi device number");
MODULE_PARM_DESC(radio_nr, "radio device number");
MODULE_PARM_DESC(tuner, "tuner type");
MODULE_PARM_DESC(card, "card type");
DEFINE_MUTEX(saa7134_devlist_lock);
EXPORT_SYMBOL(saa7134_devlist_lock);
LIST_HEAD(saa7134_devlist);
EXPORT_SYMBOL(saa7134_devlist);
static LIST_HEAD(mops_list);
static unsigned int saa7134_devcount;
int (*saa7134_dmasound_init)(struct saa7134_dev *dev);
int (*saa7134_dmasound_exit)(struct saa7134_dev *dev);
#define dprintk(fmt, arg...) if (core_debug) \
printk(KERN_DEBUG "%s/core: " fmt, dev->name , ## arg)
void saa7134_track_gpio(struct saa7134_dev *dev, char *msg)
{
unsigned long mode,status;
if (!gpio_tracking)
return;
/* rising SAA7134_GPIO_GPRESCAN reads the status */
saa_andorb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN,0);
saa_andorb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN,SAA7134_GPIO_GPRESCAN);
mode = saa_readl(SAA7134_GPIO_GPMODE0 >> 2) & 0xfffffff;
status = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2) & 0xfffffff;
printk(KERN_DEBUG
"%s: gpio: mode=0x%07lx in=0x%07lx out=0x%07lx [%s]\n",
dev->name, mode, (~mode) & status, mode & status, msg);
}
void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value)
{
u32 index, bitval;
index = 1 << bit_no;
switch (value) {
case 0: /* static value */
case 1: dprintk("setting GPIO%d to static %d\n", bit_no, value);
/* turn sync mode off if necessary */
if (index & 0x00c00000)
saa_andorb(SAA7134_VIDEO_PORT_CTRL6, 0x0f, 0x00);
if (value)
bitval = index;
else
bitval = 0;
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, index, index);
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, index, bitval);
break;
case 3: /* tristate */
dprintk("setting GPIO%d to tristate\n", bit_no);
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, index, 0);
break;
}
}
/* ------------------------------------------------------------------ */
/* ----------------------------------------------------------- */
/* delayed request_module */
#if defined(CONFIG_MODULES) && defined(MODULE)
static void request_module_async(struct work_struct *work){
struct saa7134_dev* dev = container_of(work, struct saa7134_dev, request_module_wk);
if (card_is_empress(dev))
request_module("saa7134-empress");
if (card_is_dvb(dev))
request_module("saa7134-dvb");
if (alsa) {
if (dev->pci->device != PCI_DEVICE_ID_PHILIPS_SAA7130)
request_module("saa7134-alsa");
}
}
static void request_submodules(struct saa7134_dev *dev)
{
INIT_WORK(&dev->request_module_wk, request_module_async);
schedule_work(&dev->request_module_wk);
}
static void flush_request_submodules(struct saa7134_dev *dev)
{
flush_work_sync(&dev->request_module_wk);
}
#else
#define request_submodules(dev)
#define flush_request_submodules(dev)
#endif /* CONFIG_MODULES */
/* ------------------------------------------------------------------ */
/* nr of (saa7134-)pages for the given buffer size */
static int saa7134_buffer_pages(int size)
{
size = PAGE_ALIGN(size);
size += PAGE_SIZE; /* for non-page-aligned buffers */
size /= 4096;
return size;
}
/* calc max # of buffers from size (must not exceed the 4MB virtual
* address space per DMA channel) */
int saa7134_buffer_count(unsigned int size, unsigned int count)
{
unsigned int maxcount;
maxcount = 1024 / saa7134_buffer_pages(size);
if (count > maxcount)
count = maxcount;
return count;
}
int saa7134_buffer_startpage(struct saa7134_buf *buf)
{
return saa7134_buffer_pages(buf->vb.bsize) * buf->vb.i;
}
unsigned long saa7134_buffer_base(struct saa7134_buf *buf)
{
unsigned long base;
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
base = saa7134_buffer_startpage(buf) * 4096;
base += dma->sglist[0].offset;
return base;
}
/* ------------------------------------------------------------------ */
int saa7134_pgtable_alloc(struct pci_dev *pci, struct saa7134_pgtable *pt)
{
__le32 *cpu;
dma_addr_t dma_addr = 0;
cpu = pci_alloc_consistent(pci, SAA7134_PGTABLE_SIZE, &dma_addr);
if (NULL == cpu)
return -ENOMEM;
pt->size = SAA7134_PGTABLE_SIZE;
pt->cpu = cpu;
pt->dma = dma_addr;
return 0;
}
int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt,
struct scatterlist *list, unsigned int length,
unsigned int startpage)
{
__le32 *ptr;
unsigned int i,p;
BUG_ON(NULL == pt || NULL == pt->cpu);
ptr = pt->cpu + startpage;
for (i = 0; i < length; i++, list++)
for (p = 0; p * 4096 < list->length; p++, ptr++)
*ptr = cpu_to_le32(sg_dma_address(list) - list->offset);
return 0;
}
void saa7134_pgtable_free(struct pci_dev *pci, struct saa7134_pgtable *pt)
{
if (NULL == pt->cpu)
return;
pci_free_consistent(pci, pt->size, pt->cpu, pt->dma);
pt->cpu = NULL;
}
/* ------------------------------------------------------------------ */
void saa7134_dma_free(struct videobuf_queue *q,struct saa7134_buf *buf)
{
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
/* ------------------------------------------------------------------ */
int saa7134_buffer_queue(struct saa7134_dev *dev,
struct saa7134_dmaqueue *q,
struct saa7134_buf *buf)
{
struct saa7134_buf *next = NULL;
assert_spin_locked(&dev->slock);
dprintk("buffer_queue %p\n",buf);
if (NULL == q->curr) {
if (!q->need_two) {
q->curr = buf;
buf->activate(dev,buf,NULL);
} else if (list_empty(&q->queue)) {
list_add_tail(&buf->vb.queue,&q->queue);
buf->vb.state = VIDEOBUF_QUEUED;
} else {
next = list_entry(q->queue.next,struct saa7134_buf,
vb.queue);
q->curr = buf;
buf->activate(dev,buf,next);
}
} else {
list_add_tail(&buf->vb.queue,&q->queue);
buf->vb.state = VIDEOBUF_QUEUED;
}
return 0;
}
void saa7134_buffer_finish(struct saa7134_dev *dev,
struct saa7134_dmaqueue *q,
unsigned int state)
{
assert_spin_locked(&dev->slock);
dprintk("buffer_finish %p\n",q->curr);
/* finish current buffer */
q->curr->vb.state = state;
do_gettimeofday(&q->curr->vb.ts);
wake_up(&q->curr->vb.done);
q->curr = NULL;
}
void saa7134_buffer_next(struct saa7134_dev *dev,
struct saa7134_dmaqueue *q)
{
struct saa7134_buf *buf,*next = NULL;
assert_spin_locked(&dev->slock);
BUG_ON(NULL != q->curr);
if (!list_empty(&q->queue)) {
/* activate next one from queue */
buf = list_entry(q->queue.next,struct saa7134_buf,vb.queue);
dprintk("buffer_next %p [prev=%p/next=%p]\n",
buf,q->queue.prev,q->queue.next);
list_del(&buf->vb.queue);
if (!list_empty(&q->queue))
next = list_entry(q->queue.next,struct saa7134_buf,
vb.queue);
q->curr = buf;
buf->activate(dev,buf,next);
dprintk("buffer_next #2 prev=%p/next=%p\n",
q->queue.prev,q->queue.next);
} else {
/* nothing to do -- just stop DMA */
dprintk("buffer_next %p\n",NULL);
saa7134_set_dmabits(dev);
del_timer(&q->timeout);
if (card_has_mpeg(dev))
if (dev->ts_started)
saa7134_ts_stop(dev);
}
}
void saa7134_buffer_timeout(unsigned long data)
{
struct saa7134_dmaqueue *q = (struct saa7134_dmaqueue*)data;
struct saa7134_dev *dev = q->dev;
unsigned long flags;
spin_lock_irqsave(&dev->slock,flags);
/* try to reset the hardware (SWRST) */
saa_writeb(SAA7134_REGION_ENABLE, 0x00);
saa_writeb(SAA7134_REGION_ENABLE, 0x80);
saa_writeb(SAA7134_REGION_ENABLE, 0x00);
/* flag current buffer as failed,
try to start over with the next one. */
if (q->curr) {
dprintk("timeout on %p\n",q->curr);
saa7134_buffer_finish(dev,q,VIDEOBUF_ERROR);
}
saa7134_buffer_next(dev,q);
spin_unlock_irqrestore(&dev->slock,flags);
}
/* ------------------------------------------------------------------ */
int saa7134_set_dmabits(struct saa7134_dev *dev)
{
u32 split, task=0, ctrl=0, irq=0;
enum v4l2_field cap = V4L2_FIELD_ANY;
enum v4l2_field ov = V4L2_FIELD_ANY;
assert_spin_locked(&dev->slock);
if (dev->insuspend)
return 0;
/* video capture -- dma 0 + video task A */
if (dev->video_q.curr) {
task |= 0x01;
ctrl |= SAA7134_MAIN_CTRL_TE0;
irq |= SAA7134_IRQ1_INTE_RA0_1 |
SAA7134_IRQ1_INTE_RA0_0;
cap = dev->video_q.curr->vb.field;
}
/* video capture -- dma 1+2 (planar modes) */
if (dev->video_q.curr &&
dev->video_q.curr->fmt->planar) {
ctrl |= SAA7134_MAIN_CTRL_TE4 |
SAA7134_MAIN_CTRL_TE5;
}
/* screen overlay -- dma 0 + video task B */
if (dev->ovenable) {
task |= 0x10;
ctrl |= SAA7134_MAIN_CTRL_TE1;
ov = dev->ovfield;
}
/* vbi capture -- dma 0 + vbi task A+B */
if (dev->vbi_q.curr) {
task |= 0x22;
ctrl |= SAA7134_MAIN_CTRL_TE2 |
SAA7134_MAIN_CTRL_TE3;
irq |= SAA7134_IRQ1_INTE_RA0_7 |
SAA7134_IRQ1_INTE_RA0_6 |
SAA7134_IRQ1_INTE_RA0_5 |
SAA7134_IRQ1_INTE_RA0_4;
}
/* audio capture -- dma 3 */
if (dev->dmasound.dma_running) {
ctrl |= SAA7134_MAIN_CTRL_TE6;
irq |= SAA7134_IRQ1_INTE_RA3_1 |
SAA7134_IRQ1_INTE_RA3_0;
}
/* TS capture -- dma 5 */
if (dev->ts_q.curr) {
ctrl |= SAA7134_MAIN_CTRL_TE5;
irq |= SAA7134_IRQ1_INTE_RA2_1 |
SAA7134_IRQ1_INTE_RA2_0;
}
/* set task conditions + field handling */
if (V4L2_FIELD_HAS_BOTH(cap) || V4L2_FIELD_HAS_BOTH(ov) || cap == ov) {
/* default config -- use full frames */
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_A), 0x0d);
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_B), 0x0d);
saa_writeb(SAA7134_FIELD_HANDLING(TASK_A), 0x02);
saa_writeb(SAA7134_FIELD_HANDLING(TASK_B), 0x02);
split = 0;
} else {
/* split fields between tasks */
if (V4L2_FIELD_TOP == cap) {
/* odd A, even B, repeat */
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_A), 0x0d);
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_B), 0x0e);
} else {
/* odd B, even A, repeat */
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_A), 0x0e);
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_B), 0x0d);
}
saa_writeb(SAA7134_FIELD_HANDLING(TASK_A), 0x01);
saa_writeb(SAA7134_FIELD_HANDLING(TASK_B), 0x01);
split = 1;
}
/* irqs */
saa_writeb(SAA7134_REGION_ENABLE, task);
saa_writel(SAA7134_IRQ1, irq);
saa_andorl(SAA7134_MAIN_CTRL,
SAA7134_MAIN_CTRL_TE0 |
SAA7134_MAIN_CTRL_TE1 |
SAA7134_MAIN_CTRL_TE2 |
SAA7134_MAIN_CTRL_TE3 |
SAA7134_MAIN_CTRL_TE4 |
SAA7134_MAIN_CTRL_TE5 |
SAA7134_MAIN_CTRL_TE6,
ctrl);
dprintk("dmabits: task=0x%02x ctrl=0x%02x irq=0x%x split=%s\n",
task, ctrl, irq, split ? "no" : "yes");
return 0;
}
/* ------------------------------------------------------------------ */
/* IRQ handler + helpers */
static char *irqbits[] = {
"DONE_RA0", "DONE_RA1", "DONE_RA2", "DONE_RA3",
"AR", "PE", "PWR_ON", "RDCAP", "INTL", "FIDT", "MMC",
"TRIG_ERR", "CONF_ERR", "LOAD_ERR",
"GPIO16", "GPIO18", "GPIO22", "GPIO23"
};
#define IRQBITS ARRAY_SIZE(irqbits)
static void print_irqstatus(struct saa7134_dev *dev, int loop,
unsigned long report, unsigned long status)
{
unsigned int i;
printk(KERN_DEBUG "%s/irq[%d,%ld]: r=0x%lx s=0x%02lx",
dev->name,loop,jiffies,report,status);
for (i = 0; i < IRQBITS; i++) {
if (!(report & (1 << i)))
continue;
printk(" %s",irqbits[i]);
}
if (report & SAA7134_IRQ_REPORT_DONE_RA0) {
printk(" | RA0=%s,%s,%s,%ld",
(status & 0x40) ? "vbi" : "video",
(status & 0x20) ? "b" : "a",
(status & 0x10) ? "odd" : "even",
(status & 0x0f));
}
printk("\n");
}
static irqreturn_t saa7134_irq(int irq, void *dev_id)
{
struct saa7134_dev *dev = (struct saa7134_dev*) dev_id;
unsigned long report,status;
int loop, handled = 0;
if (dev->insuspend)
goto out;
for (loop = 0; loop < 10; loop++) {
report = saa_readl(SAA7134_IRQ_REPORT);
status = saa_readl(SAA7134_IRQ_STATUS);
/* If dmasound support is active and we get a sound report,
* mask out the report and let the saa7134-alsa module deal
* with it */
if ((report & SAA7134_IRQ_REPORT_DONE_RA3) &&
(dev->dmasound.priv_data != NULL) )
{
if (irq_debug > 1)
printk(KERN_DEBUG "%s/irq: preserving DMA sound interrupt\n",
dev->name);
report &= ~SAA7134_IRQ_REPORT_DONE_RA3;
}
if (0 == report) {
if (irq_debug > 1)
printk(KERN_DEBUG "%s/irq: no (more) work\n",
dev->name);
goto out;
}
handled = 1;
saa_writel(SAA7134_IRQ_REPORT,report);
if (irq_debug)
print_irqstatus(dev,loop,report,status);
if ((report & SAA7134_IRQ_REPORT_RDCAP) ||
(report & SAA7134_IRQ_REPORT_INTL))
saa7134_irq_video_signalchange(dev);
if ((report & SAA7134_IRQ_REPORT_DONE_RA0) &&
(status & 0x60) == 0)
saa7134_irq_video_done(dev,status);
if ((report & SAA7134_IRQ_REPORT_DONE_RA0) &&
(status & 0x40) == 0x40)
saa7134_irq_vbi_done(dev,status);
if ((report & SAA7134_IRQ_REPORT_DONE_RA2) &&
card_has_mpeg(dev))
saa7134_irq_ts_done(dev,status);
if (report & SAA7134_IRQ_REPORT_GPIO16) {
switch (dev->has_remote) {
case SAA7134_REMOTE_GPIO:
if (!dev->remote)
break;
if (dev->remote->mask_keydown & 0x10000) {
saa7134_input_irq(dev);
}
break;
case SAA7134_REMOTE_I2C:
break; /* FIXME: invoke I2C get_key() */
default: /* GPIO16 not used by IR remote */
break;
}
}
if (report & SAA7134_IRQ_REPORT_GPIO18) {
switch (dev->has_remote) {
case SAA7134_REMOTE_GPIO:
if (!dev->remote)
break;
if ((dev->remote->mask_keydown & 0x40000) ||
(dev->remote->mask_keyup & 0x40000)) {
saa7134_input_irq(dev);
}
break;
case SAA7134_REMOTE_I2C:
break; /* FIXME: invoke I2C get_key() */
default: /* GPIO18 not used by IR remote */
break;
}
}
}
if (10 == loop) {
print_irqstatus(dev,loop,report,status);
if (report & SAA7134_IRQ_REPORT_PE) {
/* disable all parity error */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing PE (parity error!) enable bit\n",dev->name);
saa_clearl(SAA7134_IRQ2,SAA7134_IRQ2_INTE_PE);
} else if (report & SAA7134_IRQ_REPORT_GPIO16) {
/* disable gpio16 IRQ */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing GPIO16 enable bit\n",dev->name);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_P);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_N);
} else if (report & SAA7134_IRQ_REPORT_GPIO18) {
/* disable gpio18 IRQs */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing GPIO18 enable bit\n",dev->name);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_N);
} else {
/* disable all irqs */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing all enable bits\n",dev->name);
saa_writel(SAA7134_IRQ1,0);
saa_writel(SAA7134_IRQ2,0);
}
}
out:
return IRQ_RETVAL(handled);
}
/* ------------------------------------------------------------------ */
/* early init (no i2c, no irq) */
static int saa7134_hw_enable1(struct saa7134_dev *dev)
{
/* RAM FIFO config */
saa_writel(SAA7134_FIFO_SIZE, 0x08070503);
saa_writel(SAA7134_THRESHOULD, 0x02020202);
/* enable audio + video processing */
saa_writel(SAA7134_MAIN_CTRL,
SAA7134_MAIN_CTRL_VPLLE |
SAA7134_MAIN_CTRL_APLLE |
SAA7134_MAIN_CTRL_EXOSC |
SAA7134_MAIN_CTRL_EVFE1 |
SAA7134_MAIN_CTRL_EVFE2 |
SAA7134_MAIN_CTRL_ESFE |
SAA7134_MAIN_CTRL_EBDAC);
/*
* Initialize OSS _after_ enabling audio clock PLL and audio processing.
* OSS initialization writes to registers via the audio DSP; these
* writes will fail unless the audio clock has been started. At worst,
* audio will not work.
*/
/* enable peripheral devices */
saa_writeb(SAA7134_SPECIAL_MODE, 0x01);
/* set vertical line numbering start (vbi needs this) */
saa_writeb(SAA7134_SOURCE_TIMING2, 0x20);
return 0;
}
static int saa7134_hwinit1(struct saa7134_dev *dev)
{
dprintk("hwinit1\n");
saa_writel(SAA7134_IRQ1, 0);
saa_writel(SAA7134_IRQ2, 0);
/* Clear any stale IRQ reports */
saa_writel(SAA7134_IRQ_REPORT, saa_readl(SAA7134_IRQ_REPORT));
mutex_init(&dev->lock);
spin_lock_init(&dev->slock);
saa7134_track_gpio(dev,"pre-init");
saa7134_video_init1(dev);
saa7134_vbi_init1(dev);
if (card_has_mpeg(dev))
saa7134_ts_init1(dev);
saa7134_input_init1(dev);
saa7134_hw_enable1(dev);
return 0;
}
/* late init (with i2c + irq) */
static int saa7134_hw_enable2(struct saa7134_dev *dev)
{
unsigned int irq2_mask;
/* enable IRQ's */
irq2_mask =
SAA7134_IRQ2_INTE_DEC3 |
SAA7134_IRQ2_INTE_DEC2 |
SAA7134_IRQ2_INTE_DEC1 |
SAA7134_IRQ2_INTE_DEC0 |
SAA7134_IRQ2_INTE_PE |
SAA7134_IRQ2_INTE_AR;
if (dev->has_remote == SAA7134_REMOTE_GPIO && dev->remote) {
if (dev->remote->mask_keydown & 0x10000)
irq2_mask |= SAA7134_IRQ2_INTE_GPIO16_N;
else { /* Allow enabling both IRQ edge triggers */
if (dev->remote->mask_keydown & 0x40000)
irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_P;
if (dev->remote->mask_keyup & 0x40000)
irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_N;
}
}
if (dev->has_remote == SAA7134_REMOTE_I2C) {
request_module("ir-kbd-i2c");
}
saa_writel(SAA7134_IRQ1, 0);
saa_writel(SAA7134_IRQ2, irq2_mask);
return 0;
}
static int saa7134_hwinit2(struct saa7134_dev *dev)
{
dprintk("hwinit2\n");
saa7134_video_init2(dev);
saa7134_tvaudio_init2(dev);
saa7134_hw_enable2(dev);
return 0;
}
/* shutdown */
static int saa7134_hwfini(struct saa7134_dev *dev)
{
dprintk("hwfini\n");
if (card_has_mpeg(dev))
saa7134_ts_fini(dev);
saa7134_input_fini(dev);
saa7134_vbi_fini(dev);
saa7134_tvaudio_fini(dev);
return 0;
}
static void __devinit must_configure_manually(int has_eeprom)
{
unsigned int i,p;
if (!has_eeprom)
printk(KERN_WARNING
"saa7134: <rant>\n"
"saa7134: Congratulations! Your TV card vendor saved a few\n"
"saa7134: cents for a eeprom, thus your pci board has no\n"
"saa7134: subsystem ID and I can't identify it automatically\n"
"saa7134: </rant>\n"
"saa7134: I feel better now. Ok, here are the good news:\n"
"saa7134: You can use the card=<nr> insmod option to specify\n"
"saa7134: which board do you have. The list:\n");
else
printk(KERN_WARNING
"saa7134: Board is currently unknown. You might try to use the card=<nr>\n"
"saa7134: insmod option to specify which board do you have, but this is\n"
"saa7134: somewhat risky, as might damage your card. It is better to ask\n"
"saa7134: for support at linux-media@vger.kernel.org.\n"
"saa7134: The supported cards are:\n");
for (i = 0; i < saa7134_bcount; i++) {
printk(KERN_WARNING "saa7134: card=%d -> %-40.40s",
i,saa7134_boards[i].name);
for (p = 0; saa7134_pci_tbl[p].driver_data; p++) {
if (saa7134_pci_tbl[p].driver_data != i)
continue;
printk(" %04x:%04x",
saa7134_pci_tbl[p].subvendor,
saa7134_pci_tbl[p].subdevice);
}
printk("\n");
}
}
static struct video_device *vdev_init(struct saa7134_dev *dev,
struct video_device *template,
char *type)
{
struct video_device *vfd;
vfd = video_device_alloc();
if (NULL == vfd)
return NULL;
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
vfd->debug = video_debug;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
dev->name, type, saa7134_boards[dev->board].name);
video_set_drvdata(vfd, dev);
return vfd;
}
static void saa7134_unregister_video(struct saa7134_dev *dev)
{
if (dev->video_dev) {
if (video_is_registered(dev->video_dev))
video_unregister_device(dev->video_dev);
else
video_device_release(dev->video_dev);
dev->video_dev = NULL;
}
if (dev->vbi_dev) {
if (video_is_registered(dev->vbi_dev))
video_unregister_device(dev->vbi_dev);
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
}
if (dev->radio_dev) {
if (video_is_registered(dev->radio_dev))
video_unregister_device(dev->radio_dev);
else
video_device_release(dev->radio_dev);
dev->radio_dev = NULL;
}
}
static void mpeg_ops_attach(struct saa7134_mpeg_ops *ops,
struct saa7134_dev *dev)
{
int err;
if (NULL != dev->mops)
return;
if (saa7134_boards[dev->board].mpeg != ops->type)
return;
err = ops->init(dev);
if (0 != err)
return;
dev->mops = ops;
}
static void mpeg_ops_detach(struct saa7134_mpeg_ops *ops,
struct saa7134_dev *dev)
{
if (NULL == dev->mops)
return;
if (dev->mops != ops)
return;
dev->mops->fini(dev);
dev->mops = NULL;
}
static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
struct saa7134_dev *dev;
struct saa7134_mpeg_ops *mops;
int err;
if (saa7134_devcount == SAA7134_MAXBOARDS)
return -ENOMEM;
dev = kzalloc(sizeof(*dev),GFP_KERNEL);
if (NULL == dev)
return -ENOMEM;
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err)
goto fail0;
/* pci init */
dev->pci = pci_dev;
if (pci_enable_device(pci_dev)) {
err = -EIO;
goto fail1;
}
dev->nr = saa7134_devcount;
sprintf(dev->name,"saa%x[%d]",pci_dev->device,dev->nr);
/* pci quirks */
if (pci_pci_problems) {
if (pci_pci_problems & PCIPCI_TRITON)
printk(KERN_INFO "%s: quirk: PCIPCI_TRITON\n", dev->name);
if (pci_pci_problems & PCIPCI_NATOMA)
printk(KERN_INFO "%s: quirk: PCIPCI_NATOMA\n", dev->name);
if (pci_pci_problems & PCIPCI_VIAETBF)
printk(KERN_INFO "%s: quirk: PCIPCI_VIAETBF\n", dev->name);
if (pci_pci_problems & PCIPCI_VSFX)
printk(KERN_INFO "%s: quirk: PCIPCI_VSFX\n",dev->name);
#ifdef PCIPCI_ALIMAGIK
if (pci_pci_problems & PCIPCI_ALIMAGIK) {
printk(KERN_INFO "%s: quirk: PCIPCI_ALIMAGIK -- latency fixup\n",
dev->name);
latency = 0x0A;
}
#endif
if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL)) {
printk(KERN_INFO "%s: quirk: this driver and your "
"chipset may not work together"
" in overlay mode.\n",dev->name);
if (!saa7134_no_overlay) {
printk(KERN_INFO "%s: quirk: overlay "
"mode will be disabled.\n",
dev->name);
saa7134_no_overlay = 1;
} else {
printk(KERN_INFO "%s: quirk: overlay "
"mode will be forced. Use this"
" option at your own risk.\n",
dev->name);
}
}
}
if (UNSET != latency) {
printk(KERN_INFO "%s: setting pci latency timer to %d\n",
dev->name,latency);
pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
}
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
printk(KERN_INFO "%s: found at %s, rev: %d, irq: %d, "
"latency: %d, mmio: 0x%llx\n", dev->name,
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
pci_set_master(pci_dev);
if (!pci_dma_supported(pci_dev, DMA_BIT_MASK(32))) {
printk("%s: Oops: no 32bit PCI DMA ???\n",dev->name);
err = -EIO;
goto fail1;
}
/* board config */
dev->board = pci_id->driver_data;
if (card[dev->nr] >= 0 &&
card[dev->nr] < saa7134_bcount)
dev->board = card[dev->nr];
if (SAA7134_BOARD_UNKNOWN == dev->board)
must_configure_manually(0);
else if (SAA7134_BOARD_NOAUTO == dev->board) {
must_configure_manually(1);
dev->board = SAA7134_BOARD_UNKNOWN;
}
dev->autodetected = card[dev->nr] != dev->board;
dev->tuner_type = saa7134_boards[dev->board].tuner_type;
dev->tuner_addr = saa7134_boards[dev->board].tuner_addr;
dev->radio_type = saa7134_boards[dev->board].radio_type;
dev->radio_addr = saa7134_boards[dev->board].radio_addr;
dev->tda9887_conf = saa7134_boards[dev->board].tda9887_conf;
if (UNSET != tuner[dev->nr])
dev->tuner_type = tuner[dev->nr];
printk(KERN_INFO "%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
dev->name,pci_dev->subsystem_vendor,
pci_dev->subsystem_device,saa7134_boards[dev->board].name,
dev->board, dev->autodetected ?
"autodetected" : "insmod option");
/* get mmio */
if (!request_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0),
dev->name)) {
err = -EBUSY;
printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
dev->name,(unsigned long long)pci_resource_start(pci_dev,0));
goto fail1;
}
dev->lmmio = ioremap(pci_resource_start(pci_dev, 0),
pci_resource_len(pci_dev, 0));
dev->bmmio = (__u8 __iomem *)dev->lmmio;
if (NULL == dev->lmmio) {
err = -EIO;
printk(KERN_ERR "%s: can't ioremap() MMIO memory\n",
dev->name);
goto fail2;
}
/* initialize hardware #1 */
saa7134_board_init1(dev);
saa7134_hwinit1(dev);
/* get irq */
err = request_irq(pci_dev->irq, saa7134_irq,
IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name,pci_dev->irq);
goto fail3;
}
/* wait a bit, register i2c bus */
msleep(100);
saa7134_i2c_register(dev);
saa7134_board_init2(dev);
saa7134_hwinit2(dev);
/* load i2c helpers */
if (card_is_empress(dev)) {
struct v4l2_subdev *sd =
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"saa6752hs",
saa7134_boards[dev->board].empress_addr, NULL);
if (sd)
sd->grp_id = GRP_EMPRESS;
}
if (saa7134_boards[dev->board].rds_addr) {
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_adap, "saa6588",
0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr));
if (sd) {
printk(KERN_INFO "%s: found RDS decoder\n", dev->name);
dev->has_rds = 1;
}
}
v4l2_prio_init(&dev->prio);
mutex_lock(&saa7134_devlist_lock);
list_for_each_entry(mops, &mops_list, next)
mpeg_ops_attach(mops, dev);
list_add_tail(&dev->devlist, &saa7134_devlist);
mutex_unlock(&saa7134_devlist_lock);
/* check for signal */
saa7134_irq_video_signalchange(dev);
if (TUNER_ABSENT != dev->tuner_type)
saa_call_all(dev, core, s_power, 0);
/* register v4l devices */
if (saa7134_no_overlay > 0)
printk(KERN_INFO "%s: Overlay support disabled.\n", dev->name);
dev->video_dev = vdev_init(dev,&saa7134_video_template,"video");
err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
printk(KERN_INFO "%s: can't register video device\n",
dev->name);
goto fail4;
}
printk(KERN_INFO "%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(dev->video_dev));
dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0)
goto fail4;
printk(KERN_INFO "%s: registered device %s\n",
dev->name, video_device_node_name(dev->vbi_dev));
if (card_has_radio(dev)) {
dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[dev->nr]);
if (err < 0)
goto fail4;
printk(KERN_INFO "%s: registered device %s\n",
dev->name, video_device_node_name(dev->radio_dev));
}
/* everything worked */
saa7134_devcount++;
if (saa7134_dmasound_init && !dev->dmasound.priv_data)
saa7134_dmasound_init(dev);
request_submodules(dev);
return 0;
fail4:
saa7134_unregister_video(dev);
saa7134_i2c_unregister(dev);
free_irq(pci_dev->irq, dev);
fail3:
saa7134_hwfini(dev);
iounmap(dev->lmmio);
fail2:
release_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0));
fail1:
v4l2_device_unregister(&dev->v4l2_dev);
fail0:
kfree(dev);
return err;
}
static void __devexit saa7134_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
struct saa7134_mpeg_ops *mops;
flush_request_submodules(dev);
/* Release DMA sound modules if present */
if (saa7134_dmasound_exit && dev->dmasound.priv_data) {
saa7134_dmasound_exit(dev);
}
/* debugging ... */
if (irq_debug) {
u32 report = saa_readl(SAA7134_IRQ_REPORT);
u32 status = saa_readl(SAA7134_IRQ_STATUS);
print_irqstatus(dev,42,report,status);
}
/* disable peripheral devices */
saa_writeb(SAA7134_SPECIAL_MODE,0);
/* shutdown hardware */
saa_writel(SAA7134_IRQ1,0);
saa_writel(SAA7134_IRQ2,0);
saa_writel(SAA7134_MAIN_CTRL,0);
/* shutdown subsystems */
saa7134_hwfini(dev);
/* unregister */
mutex_lock(&saa7134_devlist_lock);
list_del(&dev->devlist);
list_for_each_entry(mops, &mops_list, next)
mpeg_ops_detach(mops, dev);
mutex_unlock(&saa7134_devlist_lock);
saa7134_devcount--;
saa7134_i2c_unregister(dev);
saa7134_unregister_video(dev);
/* the DMA sound modules should be unloaded before reaching
this, but just in case they are still present... */
if (dev->dmasound.priv_data != NULL) {
free_irq(pci_dev->irq, &dev->dmasound);
dev->dmasound.priv_data = NULL;
}
/* release resources */
free_irq(pci_dev->irq, dev);
iounmap(dev->lmmio);
release_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0));
v4l2_device_unregister(&dev->v4l2_dev);
/* free memory */
kfree(dev);
}
#ifdef CONFIG_PM
/* resends a current buffer in queue after resume */
static int saa7134_buffer_requeue(struct saa7134_dev *dev,
struct saa7134_dmaqueue *q)
{
struct saa7134_buf *buf, *next;
assert_spin_locked(&dev->slock);
buf = q->curr;
next = buf;
dprintk("buffer_requeue\n");
if (!buf)
return 0;
dprintk("buffer_requeue : resending active buffers \n");
if (!list_empty(&q->queue))
next = list_entry(q->queue.next, struct saa7134_buf,
vb.queue);
buf->activate(dev, buf, next);
return 0;
}
static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
/* disable overlay - apps should enable it explicitly on resume*/
dev->ovenable = 0;
/* Disable interrupts, DMA, and rest of the chip*/
saa_writel(SAA7134_IRQ1, 0);
saa_writel(SAA7134_IRQ2, 0);
saa_writel(SAA7134_MAIN_CTRL, 0);
dev->insuspend = 1;
synchronize_irq(pci_dev->irq);
/* ACK interrupts once more, just in case,
since the IRQ handler won't ack them anymore*/
saa_writel(SAA7134_IRQ_REPORT, saa_readl(SAA7134_IRQ_REPORT));
/* Disable timeout timers - if we have active buffers, we will
fill them on resume*/
del_timer(&dev->video_q.timeout);
del_timer(&dev->vbi_q.timeout);
del_timer(&dev->ts_q.timeout);
if (dev->remote)
saa7134_ir_stop(dev);
pci_save_state(pci_dev);
pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
return 0;
}
static int saa7134_resume(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
unsigned long flags;
pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
/* Do things that are done in saa7134_initdev ,
except of initializing memory structures.*/
saa7134_board_init1(dev);
/* saa7134_hwinit1 */
if (saa7134_boards[dev->board].video_out)
saa7134_videoport_init(dev);
if (card_has_mpeg(dev))
saa7134_ts_init_hw(dev);
if (dev->remote)
saa7134_ir_start(dev);
saa7134_hw_enable1(dev);
msleep(100);
saa7134_board_init2(dev);
/*saa7134_hwinit2*/
saa7134_set_tvnorm_hw(dev);
saa7134_tvaudio_setmute(dev);
saa7134_tvaudio_setvolume(dev, dev->ctl_volume);
saa7134_tvaudio_init(dev);
saa7134_enable_i2s(dev);
saa7134_hw_enable2(dev);
saa7134_irq_video_signalchange(dev);
/*resume unfinished buffer(s)*/
spin_lock_irqsave(&dev->slock, flags);
saa7134_buffer_requeue(dev, &dev->video_q);
saa7134_buffer_requeue(dev, &dev->vbi_q);
saa7134_buffer_requeue(dev, &dev->ts_q);
/* FIXME: Disable DMA audio sound - temporary till proper support
is implemented*/
dev->dmasound.dma_running = 0;
/* start DMA now*/
dev->insuspend = 0;
smp_wmb();
saa7134_set_dmabits(dev);
spin_unlock_irqrestore(&dev->slock, flags);
return 0;
}
#endif
/* ----------------------------------------------------------- */
int saa7134_ts_register(struct saa7134_mpeg_ops *ops)
{
struct saa7134_dev *dev;
mutex_lock(&saa7134_devlist_lock);
list_for_each_entry(dev, &saa7134_devlist, devlist)
mpeg_ops_attach(ops, dev);
list_add_tail(&ops->next,&mops_list);
mutex_unlock(&saa7134_devlist_lock);
return 0;
}
void saa7134_ts_unregister(struct saa7134_mpeg_ops *ops)
{
struct saa7134_dev *dev;
mutex_lock(&saa7134_devlist_lock);
list_del(&ops->next);
list_for_each_entry(dev, &saa7134_devlist, devlist)
mpeg_ops_detach(ops, dev);
mutex_unlock(&saa7134_devlist_lock);
}
EXPORT_SYMBOL(saa7134_ts_register);
EXPORT_SYMBOL(saa7134_ts_unregister);
/* ----------------------------------------------------------- */
static struct pci_driver saa7134_pci_driver = {
.name = "saa7134",
.id_table = saa7134_pci_tbl,
.probe = saa7134_initdev,
.remove = __devexit_p(saa7134_finidev),
#ifdef CONFIG_PM
.suspend = saa7134_suspend,
.resume = saa7134_resume
#endif
};
static int __init saa7134_init(void)
{
INIT_LIST_HEAD(&saa7134_devlist);
printk(KERN_INFO "saa7130/34: v4l2 driver version %s loaded\n",
SAA7134_VERSION);
return pci_register_driver(&saa7134_pci_driver);
}
static void __exit saa7134_fini(void)
{
pci_unregister_driver(&saa7134_pci_driver);
}
module_init(saa7134_init);
module_exit(saa7134_fini);
/* ----------------------------------------------------------- */
EXPORT_SYMBOL(saa7134_set_gpio);
EXPORT_SYMBOL(saa7134_boards);
/* ----------------- for the DMA sound modules --------------- */
EXPORT_SYMBOL(saa7134_dmasound_init);
EXPORT_SYMBOL(saa7134_dmasound_exit);
EXPORT_SYMBOL(saa7134_pgtable_free);
EXPORT_SYMBOL(saa7134_pgtable_build);
EXPORT_SYMBOL(saa7134_pgtable_alloc);
EXPORT_SYMBOL(saa7134_set_dmabits);
/* ----------------------------------------------------------- */
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
Joshndroid/kernel_samsung_lt03wifi | drivers/scsi/mesh.c | 7811 | 53907 | /*
* SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware)
* bus adaptor found on Power Macintosh computers.
* We assume the MESH is connected to a DBDMA (descriptor-based DMA)
* controller.
*
* Paul Mackerras, August 1996.
* Copyright (C) 1996 Paul Mackerras.
*
* Apr. 21 2002 - BenH Rework bus reset code for new error handler
* Add delay after initial bus reset
* Add module parameters
*
* Sep. 27 2003 - BenH Move to new driver model, fix some write posting
* issues
* To do:
* - handle aborts correctly
* - retry arbitration if lost (unless higher levels do this for us)
* - power down the chip when no device is detected
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/spinlock.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/pci-bridge.h>
#include <asm/macio.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include "mesh.h"
#if 1
#undef KERN_DEBUG
#define KERN_DEBUG KERN_WARNING
#endif
MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)");
MODULE_DESCRIPTION("PowerMac MESH SCSI driver");
MODULE_LICENSE("GPL");
static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE;
static int sync_targets = 0xff;
static int resel_targets = 0xff;
static int debug_targets = 0; /* print debug for these targets */
static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS;
module_param(sync_rate, int, 0);
MODULE_PARM_DESC(sync_rate, "Synchronous rate (0..10, 0=async)");
module_param(sync_targets, int, 0);
MODULE_PARM_DESC(sync_targets, "Bitmask of targets allowed to set synchronous");
module_param(resel_targets, int, 0);
MODULE_PARM_DESC(resel_targets, "Bitmask of targets allowed to set disconnect");
module_param(debug_targets, int, 0644);
MODULE_PARM_DESC(debug_targets, "Bitmask of debugged targets");
module_param(init_reset_delay, int, 0);
MODULE_PARM_DESC(init_reset_delay, "Initial bus reset delay (0=no reset)");
static int mesh_sync_period = 100;
static int mesh_sync_offset = 0;
static unsigned char use_active_neg = 0; /* bit mask for SEQ_ACTIVE_NEG if used */
#define ALLOW_SYNC(tgt) ((sync_targets >> (tgt)) & 1)
#define ALLOW_RESEL(tgt) ((resel_targets >> (tgt)) & 1)
#define ALLOW_DEBUG(tgt) ((debug_targets >> (tgt)) & 1)
#define DEBUG_TARGET(cmd) ((cmd) && ALLOW_DEBUG((cmd)->device->id))
#undef MESH_DBG
#define N_DBG_LOG 50
#define N_DBG_SLOG 20
#define NUM_DBG_EVENTS 13
#undef DBG_USE_TB /* bombs on 601 */
struct dbglog {
char *fmt;
u32 tb;
u8 phase;
u8 bs0;
u8 bs1;
u8 tgt;
int d;
};
enum mesh_phase {
idle,
arbitrating,
selecting,
commanding,
dataing,
statusing,
busfreeing,
disconnecting,
reselecting,
sleeping
};
enum msg_phase {
msg_none,
msg_out,
msg_out_xxx,
msg_out_last,
msg_in,
msg_in_bad,
};
enum sdtr_phase {
do_sdtr,
sdtr_sent,
sdtr_done
};
struct mesh_target {
enum sdtr_phase sdtr_state;
int sync_params;
int data_goes_out; /* guess as to data direction */
struct scsi_cmnd *current_req;
u32 saved_ptr;
#ifdef MESH_DBG
int log_ix;
int n_log;
struct dbglog log[N_DBG_LOG];
#endif
};
struct mesh_state {
volatile struct mesh_regs __iomem *mesh;
int meshintr;
volatile struct dbdma_regs __iomem *dma;
int dmaintr;
struct Scsi_Host *host;
struct mesh_state *next;
struct scsi_cmnd *request_q;
struct scsi_cmnd *request_qtail;
enum mesh_phase phase; /* what we're currently trying to do */
enum msg_phase msgphase;
int conn_tgt; /* target we're connected to */
struct scsi_cmnd *current_req; /* req we're currently working on */
int data_ptr;
int dma_started;
int dma_count;
int stat;
int aborting;
int expect_reply;
int n_msgin;
u8 msgin[16];
int n_msgout;
int last_n_msgout;
u8 msgout[16];
struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */
dma_addr_t dma_cmd_bus;
void *dma_cmd_space;
int dma_cmd_size;
int clk_freq;
struct mesh_target tgts[8];
struct macio_dev *mdev;
struct pci_dev* pdev;
#ifdef MESH_DBG
int log_ix;
int n_log;
struct dbglog log[N_DBG_SLOG];
#endif
};
/*
* Driver is too messy, we need a few prototypes...
*/
static void mesh_done(struct mesh_state *ms, int start_next);
static void mesh_interrupt(struct mesh_state *ms);
static void cmd_complete(struct mesh_state *ms);
static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd);
static void halt_dma(struct mesh_state *ms);
static void phase_mismatch(struct mesh_state *ms);
/*
* Some debugging & logging routines
*/
#ifdef MESH_DBG
static inline u32 readtb(void)
{
u32 tb;
#ifdef DBG_USE_TB
/* Beware: if you enable this, it will crash on 601s. */
asm ("mftb %0" : "=r" (tb) : );
#else
tb = 0;
#endif
return tb;
}
static void dlog(struct mesh_state *ms, char *fmt, int a)
{
struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
struct dbglog *tlp, *slp;
tlp = &tp->log[tp->log_ix];
slp = &ms->log[ms->log_ix];
tlp->fmt = fmt;
tlp->tb = readtb();
tlp->phase = (ms->msgphase << 4) + ms->phase;
tlp->bs0 = ms->mesh->bus_status0;
tlp->bs1 = ms->mesh->bus_status1;
tlp->tgt = ms->conn_tgt;
tlp->d = a;
*slp = *tlp;
if (++tp->log_ix >= N_DBG_LOG)
tp->log_ix = 0;
if (tp->n_log < N_DBG_LOG)
++tp->n_log;
if (++ms->log_ix >= N_DBG_SLOG)
ms->log_ix = 0;
if (ms->n_log < N_DBG_SLOG)
++ms->n_log;
}
static void dumplog(struct mesh_state *ms, int t)
{
struct mesh_target *tp = &ms->tgts[t];
struct dbglog *lp;
int i;
if (tp->n_log == 0)
return;
i = tp->log_ix - tp->n_log;
if (i < 0)
i += N_DBG_LOG;
tp->n_log = 0;
do {
lp = &tp->log[i];
printk(KERN_DEBUG "mesh log %d: bs=%.2x%.2x ph=%.2x ",
t, lp->bs1, lp->bs0, lp->phase);
#ifdef DBG_USE_TB
printk("tb=%10u ", lp->tb);
#endif
printk(lp->fmt, lp->d);
printk("\n");
if (++i >= N_DBG_LOG)
i = 0;
} while (i != tp->log_ix);
}
static void dumpslog(struct mesh_state *ms)
{
struct dbglog *lp;
int i;
if (ms->n_log == 0)
return;
i = ms->log_ix - ms->n_log;
if (i < 0)
i += N_DBG_SLOG;
ms->n_log = 0;
do {
lp = &ms->log[i];
printk(KERN_DEBUG "mesh log: bs=%.2x%.2x ph=%.2x t%d ",
lp->bs1, lp->bs0, lp->phase, lp->tgt);
#ifdef DBG_USE_TB
printk("tb=%10u ", lp->tb);
#endif
printk(lp->fmt, lp->d);
printk("\n");
if (++i >= N_DBG_SLOG)
i = 0;
} while (i != ms->log_ix);
}
#else
static inline void dlog(struct mesh_state *ms, char *fmt, int a)
{}
static inline void dumplog(struct mesh_state *ms, int tgt)
{}
static inline void dumpslog(struct mesh_state *ms)
{}
#endif /* MESH_DBG */
#define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
static void
mesh_dump_regs(struct mesh_state *ms)
{
volatile struct mesh_regs __iomem *mr = ms->mesh;
volatile struct dbdma_regs __iomem *md = ms->dma;
int t;
struct mesh_target *tp;
printk(KERN_DEBUG "mesh: state at %p, regs at %p, dma at %p\n",
ms, mr, md);
printk(KERN_DEBUG " ct=%4x seq=%2x bs=%4x fc=%2x "
"exc=%2x err=%2x im=%2x int=%2x sp=%2x\n",
(mr->count_hi << 8) + mr->count_lo, mr->sequence,
(mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count,
mr->exception, mr->error, mr->intr_mask, mr->interrupt,
mr->sync_params);
while(in_8(&mr->fifo_count))
printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo));
printk(KERN_DEBUG " dma stat=%x cmdptr=%x\n",
in_le32(&md->status), in_le32(&md->cmdptr));
printk(KERN_DEBUG " phase=%d msgphase=%d conn_tgt=%d data_ptr=%d\n",
ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr);
printk(KERN_DEBUG " dma_st=%d dma_ct=%d n_msgout=%d\n",
ms->dma_started, ms->dma_count, ms->n_msgout);
for (t = 0; t < 8; ++t) {
tp = &ms->tgts[t];
if (tp->current_req == NULL)
continue;
printk(KERN_DEBUG " target %d: req=%p goes_out=%d saved_ptr=%d\n",
t, tp->current_req, tp->data_goes_out, tp->saved_ptr);
}
}
/*
* Flush write buffers on the bus path to the mesh
*/
static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr)
{
(void)in_8(&mr->mesh_id);
}
/*
* Complete a SCSI command
*/
static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd)
{
(*cmd->scsi_done)(cmd);
}
/* Called with meshinterrupt disabled, initialize the chipset
* and eventually do the initial bus reset. The lock must not be
* held since we can schedule.
*/
static void mesh_init(struct mesh_state *ms)
{
volatile struct mesh_regs __iomem *mr = ms->mesh;
volatile struct dbdma_regs __iomem *md = ms->dma;
mesh_flush_io(mr);
udelay(100);
/* Reset controller */
out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
out_8(&mr->exception, 0xff); /* clear all exception bits */
out_8(&mr->error, 0xff); /* clear all error bits */
out_8(&mr->sequence, SEQ_RESETMESH);
mesh_flush_io(mr);
udelay(10);
out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
out_8(&mr->source_id, ms->host->this_id);
out_8(&mr->sel_timeout, 25); /* 250ms */
out_8(&mr->sync_params, ASYNC_PARAMS);
if (init_reset_delay) {
printk(KERN_INFO "mesh: performing initial bus reset...\n");
/* Reset bus */
out_8(&mr->bus_status1, BS1_RST); /* assert RST */
mesh_flush_io(mr);
udelay(30); /* leave it on for >= 25us */
out_8(&mr->bus_status1, 0); /* negate RST */
mesh_flush_io(mr);
/* Wait for bus to come back */
msleep(init_reset_delay);
}
/* Reconfigure controller */
out_8(&mr->interrupt, 0xff); /* clear all interrupt bits */
out_8(&mr->sequence, SEQ_FLUSHFIFO);
mesh_flush_io(mr);
udelay(1);
out_8(&mr->sync_params, ASYNC_PARAMS);
out_8(&mr->sequence, SEQ_ENBRESEL);
ms->phase = idle;
ms->msgphase = msg_none;
}
static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
{
volatile struct mesh_regs __iomem *mr = ms->mesh;
int t, id;
id = cmd->device->id;
ms->current_req = cmd;
ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE;
ms->tgts[id].current_req = cmd;
#if 1
if (DEBUG_TARGET(cmd)) {
int i;
printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id);
for (i = 0; i < cmd->cmd_len; ++i)
printk(" %x", cmd->cmnd[i]);
printk(" use_sg=%d buffer=%p bufflen=%u\n",
scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd));
}
#endif
if (ms->dma_started)
panic("mesh: double DMA start !\n");
ms->phase = arbitrating;
ms->msgphase = msg_none;
ms->data_ptr = 0;
ms->dma_started = 0;
ms->n_msgout = 0;
ms->last_n_msgout = 0;
ms->expect_reply = 0;
ms->conn_tgt = id;
ms->tgts[id].saved_ptr = 0;
ms->stat = DID_OK;
ms->aborting = 0;
#ifdef MESH_DBG
ms->tgts[id].n_log = 0;
dlog(ms, "start cmd=%x", (int) cmd);
#endif
/* Off we go */
dlog(ms, "about to arb, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
out_8(&mr->interrupt, INT_CMDDONE);
out_8(&mr->sequence, SEQ_ENBRESEL);
mesh_flush_io(mr);
udelay(1);
if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
/*
* Some other device has the bus or is arbitrating for it -
* probably a target which is about to reselect us.
*/
dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception,
mr->error, mr->fifo_count));
for (t = 100; t > 0; --t) {
if ((in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) == 0)
break;
if (in_8(&mr->interrupt) != 0) {
dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception,
mr->error, mr->fifo_count));
mesh_interrupt(ms);
if (ms->phase != arbitrating)
return;
}
udelay(1);
}
if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
/* XXX should try again in a little while */
ms->stat = DID_BUS_BUSY;
ms->phase = idle;
mesh_done(ms, 0);
return;
}
}
/*
* Apparently the mesh has a bug where it will assert both its
* own bit and the target's bit on the bus during arbitration.
*/
out_8(&mr->dest_id, mr->source_id);
/*
* There appears to be a race with reselection sometimes,
* where a target reselects us just as we issue the
* arbitrate command. It seems that then the arbitrate
* command just hangs waiting for the bus to be free
* without giving us a reselection exception.
* The only way I have found to get it to respond correctly
* is this: disable reselection before issuing the arbitrate
* command, then after issuing it, if it looks like a target
* is trying to reselect us, reset the mesh and then enable
* reselection.
*/
out_8(&mr->sequence, SEQ_DISRESEL);
if (in_8(&mr->interrupt) != 0) {
dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception,
mr->error, mr->fifo_count));
mesh_interrupt(ms);
if (ms->phase != arbitrating)
return;
dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception,
mr->error, mr->fifo_count));
}
out_8(&mr->sequence, SEQ_ARBITRATE);
for (t = 230; t > 0; --t) {
if (in_8(&mr->interrupt) != 0)
break;
udelay(1);
}
dlog(ms, "after arb, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
&& (in_8(&mr->bus_status0) & BS0_IO)) {
/* looks like a reselection - try resetting the mesh */
dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
out_8(&mr->sequence, SEQ_RESETMESH);
mesh_flush_io(mr);
udelay(10);
out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
out_8(&mr->sequence, SEQ_ENBRESEL);
mesh_flush_io(mr);
for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t)
udelay(1);
dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
#ifndef MESH_MULTIPLE_HOSTS
if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
&& (in_8(&mr->bus_status0) & BS0_IO)) {
printk(KERN_ERR "mesh: controller not responding"
" to reselection!\n");
/*
* If this is a target reselecting us, and the
* mesh isn't responding, the higher levels of
* the scsi code will eventually time out and
* reset the bus.
*/
}
#endif
}
}
/*
* Start the next command for a MESH.
* Should be called with interrupts disabled.
*/
static void mesh_start(struct mesh_state *ms)
{
struct scsi_cmnd *cmd, *prev, *next;
if (ms->phase != idle || ms->current_req != NULL) {
printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)",
ms->phase, ms);
return;
}
while (ms->phase == idle) {
prev = NULL;
for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) {
if (cmd == NULL)
return;
if (ms->tgts[cmd->device->id].current_req == NULL)
break;
prev = cmd;
}
next = (struct scsi_cmnd *) cmd->host_scribble;
if (prev == NULL)
ms->request_q = next;
else
prev->host_scribble = (void *) next;
if (next == NULL)
ms->request_qtail = prev;
mesh_start_cmd(ms, cmd);
}
}
static void mesh_done(struct mesh_state *ms, int start_next)
{
struct scsi_cmnd *cmd;
struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
cmd = ms->current_req;
ms->current_req = NULL;
tp->current_req = NULL;
if (cmd) {
cmd->result = (ms->stat << 16) + cmd->SCp.Status;
if (ms->stat == DID_OK)
cmd->result += (cmd->SCp.Message << 8);
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
cmd->result, ms->data_ptr, scsi_bufflen(cmd));
#if 0
/* needs to use sg? */
if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3)
&& cmd->request_buffer != 0) {
unsigned char *b = cmd->request_buffer;
printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n",
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
}
#endif
}
cmd->SCp.this_residual -= ms->data_ptr;
mesh_completed(ms, cmd);
}
if (start_next) {
out_8(&ms->mesh->sequence, SEQ_ENBRESEL);
mesh_flush_io(ms->mesh);
udelay(1);
ms->phase = idle;
mesh_start(ms);
}
}
static inline void add_sdtr_msg(struct mesh_state *ms)
{
int i = ms->n_msgout;
ms->msgout[i] = EXTENDED_MESSAGE;
ms->msgout[i+1] = 3;
ms->msgout[i+2] = EXTENDED_SDTR;
ms->msgout[i+3] = mesh_sync_period/4;
ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0);
ms->n_msgout = i + 5;
}
static void set_sdtr(struct mesh_state *ms, int period, int offset)
{
struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
volatile struct mesh_regs __iomem *mr = ms->mesh;
int v, tr;
tp->sdtr_state = sdtr_done;
if (offset == 0) {
/* asynchronous */
if (SYNC_OFF(tp->sync_params))
printk(KERN_INFO "mesh: target %d now asynchronous\n",
ms->conn_tgt);
tp->sync_params = ASYNC_PARAMS;
out_8(&mr->sync_params, ASYNC_PARAMS);
return;
}
/*
* We need to compute ceil(clk_freq * period / 500e6) - 2
* without incurring overflow.
*/
v = (ms->clk_freq / 5000) * period;
if (v <= 250000) {
/* special case: sync_period == 5 * clk_period */
v = 0;
/* units of tr are 100kB/s */
tr = (ms->clk_freq + 250000) / 500000;
} else {
/* sync_period == (v + 2) * 2 * clk_period */
v = (v + 99999) / 100000 - 2;
if (v > 15)
v = 15; /* oops */
tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000;
}
if (offset > 15)
offset = 15; /* can't happen */
tp->sync_params = SYNC_PARAMS(offset, v);
out_8(&mr->sync_params, tp->sync_params);
printk(KERN_INFO "mesh: target %d synchronous at %d.%d MB/s\n",
ms->conn_tgt, tr/10, tr%10);
}
static void start_phase(struct mesh_state *ms)
{
int i, seq, nb;
volatile struct mesh_regs __iomem *mr = ms->mesh;
volatile struct dbdma_regs __iomem *md = ms->dma;
struct scsi_cmnd *cmd = ms->current_req;
struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
dlog(ms, "start_phase nmo/exc/fc/seq = %.8x",
MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence));
out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
switch (ms->msgphase) {
case msg_none:
break;
case msg_in:
out_8(&mr->count_hi, 0);
out_8(&mr->count_lo, 1);
out_8(&mr->sequence, SEQ_MSGIN + seq);
ms->n_msgin = 0;
return;
case msg_out:
/*
* To make sure ATN drops before we assert ACK for
* the last byte of the message, we have to do the
* last byte specially.
*/
if (ms->n_msgout <= 0) {
printk(KERN_ERR "mesh: msg_out but n_msgout=%d\n",
ms->n_msgout);
mesh_dump_regs(ms);
ms->msgphase = msg_none;
break;
}
if (ALLOW_DEBUG(ms->conn_tgt)) {
printk(KERN_DEBUG "mesh: sending %d msg bytes:",
ms->n_msgout);
for (i = 0; i < ms->n_msgout; ++i)
printk(" %x", ms->msgout[i]);
printk("\n");
}
dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0],
ms->msgout[1], ms->msgout[2]));
out_8(&mr->count_hi, 0);
out_8(&mr->sequence, SEQ_FLUSHFIFO);
mesh_flush_io(mr);
udelay(1);
/*
* If ATN is not already asserted, we assert it, then
* issue a SEQ_MSGOUT to get the mesh to drop ACK.
*/
if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) {
dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0);
out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */
mesh_flush_io(mr);
udelay(1);
out_8(&mr->count_lo, 1);
out_8(&mr->sequence, SEQ_MSGOUT + seq);
out_8(&mr->bus_status0, 0); /* release explicit ATN */
dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0);
}
if (ms->n_msgout == 1) {
/*
* We can't issue the SEQ_MSGOUT without ATN
* until the target has asserted REQ. The logic
* in cmd_complete handles both situations:
* REQ already asserted or not.
*/
cmd_complete(ms);
} else {
out_8(&mr->count_lo, ms->n_msgout - 1);
out_8(&mr->sequence, SEQ_MSGOUT + seq);
for (i = 0; i < ms->n_msgout - 1; ++i)
out_8(&mr->fifo, ms->msgout[i]);
}
return;
default:
printk(KERN_ERR "mesh bug: start_phase msgphase=%d\n",
ms->msgphase);
}
switch (ms->phase) {
case selecting:
out_8(&mr->dest_id, ms->conn_tgt);
out_8(&mr->sequence, SEQ_SELECT + SEQ_ATN);
break;
case commanding:
out_8(&mr->sync_params, tp->sync_params);
out_8(&mr->count_hi, 0);
if (cmd) {
out_8(&mr->count_lo, cmd->cmd_len);
out_8(&mr->sequence, SEQ_COMMAND + seq);
for (i = 0; i < cmd->cmd_len; ++i)
out_8(&mr->fifo, cmd->cmnd[i]);
} else {
out_8(&mr->count_lo, 6);
out_8(&mr->sequence, SEQ_COMMAND + seq);
for (i = 0; i < 6; ++i)
out_8(&mr->fifo, 0);
}
break;
case dataing:
/* transfer data, if any */
if (!ms->dma_started) {
set_dma_cmds(ms, cmd);
out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds));
out_le32(&md->control, (RUN << 16) | RUN);
ms->dma_started = 1;
}
nb = ms->dma_count;
if (nb > 0xfff0)
nb = 0xfff0;
ms->dma_count -= nb;
ms->data_ptr += nb;
out_8(&mr->count_lo, nb);
out_8(&mr->count_hi, nb >> 8);
out_8(&mr->sequence, (tp->data_goes_out?
SEQ_DATAOUT: SEQ_DATAIN) + SEQ_DMA_MODE + seq);
break;
case statusing:
out_8(&mr->count_hi, 0);
out_8(&mr->count_lo, 1);
out_8(&mr->sequence, SEQ_STATUS + seq);
break;
case busfreeing:
case disconnecting:
out_8(&mr->sequence, SEQ_ENBRESEL);
mesh_flush_io(mr);
udelay(1);
dlog(ms, "enbresel intr/exc/err/fc=%.8x",
MKWORD(mr->interrupt, mr->exception, mr->error,
mr->fifo_count));
out_8(&mr->sequence, SEQ_BUSFREE);
break;
default:
printk(KERN_ERR "mesh: start_phase called with phase=%d\n",
ms->phase);
dumpslog(ms);
}
}
static inline void get_msgin(struct mesh_state *ms)
{
volatile struct mesh_regs __iomem *mr = ms->mesh;
int i, n;
n = mr->fifo_count;
if (n != 0) {
i = ms->n_msgin;
ms->n_msgin = i + n;
for (; n > 0; --n)
ms->msgin[i++] = in_8(&mr->fifo);
}
}
static inline int msgin_length(struct mesh_state *ms)
{
int b, n;
n = 1;
if (ms->n_msgin > 0) {
b = ms->msgin[0];
if (b == 1) {
/* extended message */
n = ms->n_msgin < 2? 2: ms->msgin[1] + 2;
} else if (0x20 <= b && b <= 0x2f) {
/* 2-byte message */
n = 2;
}
}
return n;
}
static void reselected(struct mesh_state *ms)
{
volatile struct mesh_regs __iomem *mr = ms->mesh;
struct scsi_cmnd *cmd;
struct mesh_target *tp;
int b, t, prev;
switch (ms->phase) {
case idle:
break;
case arbitrating:
if ((cmd = ms->current_req) != NULL) {
/* put the command back on the queue */
cmd->host_scribble = (void *) ms->request_q;
if (ms->request_q == NULL)
ms->request_qtail = cmd;
ms->request_q = cmd;
tp = &ms->tgts[cmd->device->id];
tp->current_req = NULL;
}
break;
case busfreeing:
ms->phase = reselecting;
mesh_done(ms, 0);
break;
case disconnecting:
break;
default:
printk(KERN_ERR "mesh: reselected in phase %d/%d tgt %d\n",
ms->msgphase, ms->phase, ms->conn_tgt);
dumplog(ms, ms->conn_tgt);
dumpslog(ms);
}
if (ms->dma_started) {
printk(KERN_ERR "mesh: reselected with DMA started !\n");
halt_dma(ms);
}
ms->current_req = NULL;
ms->phase = dataing;
ms->msgphase = msg_in;
ms->n_msgout = 0;
ms->last_n_msgout = 0;
prev = ms->conn_tgt;
/*
* We seem to get abortive reselections sometimes.
*/
while ((in_8(&mr->bus_status1) & BS1_BSY) == 0) {
static int mesh_aborted_resels;
mesh_aborted_resels++;
out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
mesh_flush_io(mr);
udelay(1);
out_8(&mr->sequence, SEQ_ENBRESEL);
mesh_flush_io(mr);
udelay(5);
dlog(ms, "extra resel err/exc/fc = %.6x",
MKWORD(0, mr->error, mr->exception, mr->fifo_count));
}
out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
mesh_flush_io(mr);
udelay(1);
out_8(&mr->sequence, SEQ_ENBRESEL);
mesh_flush_io(mr);
udelay(1);
out_8(&mr->sync_params, ASYNC_PARAMS);
/*
* Find out who reselected us.
*/
if (in_8(&mr->fifo_count) == 0) {
printk(KERN_ERR "mesh: reselection but nothing in fifo?\n");
ms->conn_tgt = ms->host->this_id;
goto bogus;
}
/* get the last byte in the fifo */
do {
b = in_8(&mr->fifo);
dlog(ms, "reseldata %x", b);
} while (in_8(&mr->fifo_count));
for (t = 0; t < 8; ++t)
if ((b & (1 << t)) != 0 && t != ms->host->this_id)
break;
if (b != (1 << t) + (1 << ms->host->this_id)) {
printk(KERN_ERR "mesh: bad reselection data %x\n", b);
ms->conn_tgt = ms->host->this_id;
goto bogus;
}
/*
* Set up to continue with that target's transfer.
*/
ms->conn_tgt = t;
tp = &ms->tgts[t];
out_8(&mr->sync_params, tp->sync_params);
if (ALLOW_DEBUG(t)) {
printk(KERN_DEBUG "mesh: reselected by target %d\n", t);
printk(KERN_DEBUG "mesh: saved_ptr=%x goes_out=%d cmd=%p\n",
tp->saved_ptr, tp->data_goes_out, tp->current_req);
}
ms->current_req = tp->current_req;
if (tp->current_req == NULL) {
printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t);
goto bogus;
}
ms->data_ptr = tp->saved_ptr;
dlog(ms, "resel prev tgt=%d", prev);
dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception));
start_phase(ms);
return;
bogus:
dumplog(ms, ms->conn_tgt);
dumpslog(ms);
ms->data_ptr = 0;
ms->aborting = 1;
start_phase(ms);
}
static void do_abort(struct mesh_state *ms)
{
ms->msgout[0] = ABORT;
ms->n_msgout = 1;
ms->aborting = 1;
ms->stat = DID_ABORT;
dlog(ms, "abort", 0);
}
static void handle_reset(struct mesh_state *ms)
{
int tgt;
struct mesh_target *tp;
struct scsi_cmnd *cmd;
volatile struct mesh_regs __iomem *mr = ms->mesh;
for (tgt = 0; tgt < 8; ++tgt) {
tp = &ms->tgts[tgt];
if ((cmd = tp->current_req) != NULL) {
cmd->result = DID_RESET << 16;
tp->current_req = NULL;
mesh_completed(ms, cmd);
}
ms->tgts[tgt].sdtr_state = do_sdtr;
ms->tgts[tgt].sync_params = ASYNC_PARAMS;
}
ms->current_req = NULL;
while ((cmd = ms->request_q) != NULL) {
ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
cmd->result = DID_RESET << 16;
mesh_completed(ms, cmd);
}
ms->phase = idle;
ms->msgphase = msg_none;
out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
out_8(&mr->sequence, SEQ_FLUSHFIFO);
mesh_flush_io(mr);
udelay(1);
out_8(&mr->sync_params, ASYNC_PARAMS);
out_8(&mr->sequence, SEQ_ENBRESEL);
}
static irqreturn_t do_mesh_interrupt(int irq, void *dev_id)
{
unsigned long flags;
struct mesh_state *ms = dev_id;
struct Scsi_Host *dev = ms->host;
spin_lock_irqsave(dev->host_lock, flags);
mesh_interrupt(ms);
spin_unlock_irqrestore(dev->host_lock, flags);
return IRQ_HANDLED;
}
static void handle_error(struct mesh_state *ms)
{
int err, exc, count;
volatile struct mesh_regs __iomem *mr = ms->mesh;
err = in_8(&mr->error);
exc = in_8(&mr->exception);
out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
dlog(ms, "error err/exc/fc/cl=%.8x",
MKWORD(err, exc, mr->fifo_count, mr->count_lo));
if (err & ERR_SCSIRESET) {
/* SCSI bus was reset */
printk(KERN_INFO "mesh: SCSI bus reset detected: "
"waiting for end...");
while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
udelay(1);
printk("done\n");
handle_reset(ms);
/* request_q is empty, no point in mesh_start() */
return;
}
if (err & ERR_UNEXPDISC) {
/* Unexpected disconnect */
if (exc & EXC_RESELECTED) {
reselected(ms);
return;
}
if (!ms->aborting) {
printk(KERN_WARNING "mesh: target %d aborted\n",
ms->conn_tgt);
dumplog(ms, ms->conn_tgt);
dumpslog(ms);
}
out_8(&mr->interrupt, INT_CMDDONE);
ms->stat = DID_ABORT;
mesh_done(ms, 1);
return;
}
if (err & ERR_PARITY) {
if (ms->msgphase == msg_in) {
printk(KERN_ERR "mesh: msg parity error, target %d\n",
ms->conn_tgt);
ms->msgout[0] = MSG_PARITY_ERROR;
ms->n_msgout = 1;
ms->msgphase = msg_in_bad;
cmd_complete(ms);
return;
}
if (ms->stat == DID_OK) {
printk(KERN_ERR "mesh: parity error, target %d\n",
ms->conn_tgt);
ms->stat = DID_PARITY;
}
count = (mr->count_hi << 8) + mr->count_lo;
if (count == 0) {
cmd_complete(ms);
} else {
/* reissue the data transfer command */
out_8(&mr->sequence, mr->sequence);
}
return;
}
if (err & ERR_SEQERR) {
if (exc & EXC_RESELECTED) {
/* This can happen if we issue a command to
get the bus just after the target reselects us. */
static int mesh_resel_seqerr;
mesh_resel_seqerr++;
reselected(ms);
return;
}
if (exc == EXC_PHASEMM) {
static int mesh_phasemm_seqerr;
mesh_phasemm_seqerr++;
phase_mismatch(ms);
return;
}
printk(KERN_ERR "mesh: sequence error (err=%x exc=%x)\n",
err, exc);
} else {
printk(KERN_ERR "mesh: unknown error %x (exc=%x)\n", err, exc);
}
mesh_dump_regs(ms);
dumplog(ms, ms->conn_tgt);
if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) {
/* try to do what the target wants */
do_abort(ms);
phase_mismatch(ms);
return;
}
ms->stat = DID_ERROR;
mesh_done(ms, 1);
}
static void handle_exception(struct mesh_state *ms)
{
int exc;
volatile struct mesh_regs __iomem *mr = ms->mesh;
exc = in_8(&mr->exception);
out_8(&mr->interrupt, INT_EXCEPTION | INT_CMDDONE);
if (exc & EXC_RESELECTED) {
static int mesh_resel_exc;
mesh_resel_exc++;
reselected(ms);
} else if (exc == EXC_ARBLOST) {
printk(KERN_DEBUG "mesh: lost arbitration\n");
ms->stat = DID_BUS_BUSY;
mesh_done(ms, 1);
} else if (exc == EXC_SELTO) {
/* selection timed out */
ms->stat = DID_BAD_TARGET;
mesh_done(ms, 1);
} else if (exc == EXC_PHASEMM) {
/* target wants to do something different:
find out what it wants and do it. */
phase_mismatch(ms);
} else {
printk(KERN_ERR "mesh: can't cope with exception %x\n", exc);
mesh_dump_regs(ms);
dumplog(ms, ms->conn_tgt);
do_abort(ms);
phase_mismatch(ms);
}
}
static void handle_msgin(struct mesh_state *ms)
{
int i, code;
struct scsi_cmnd *cmd = ms->current_req;
struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
if (ms->n_msgin == 0)
return;
code = ms->msgin[0];
if (ALLOW_DEBUG(ms->conn_tgt)) {
printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin);
for (i = 0; i < ms->n_msgin; ++i)
printk(" %x", ms->msgin[i]);
printk("\n");
}
dlog(ms, "msgin msg=%.8x",
MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2]));
ms->expect_reply = 0;
ms->n_msgout = 0;
if (ms->n_msgin < msgin_length(ms))
goto reject;
if (cmd)
cmd->SCp.Message = code;
switch (code) {
case COMMAND_COMPLETE:
break;
case EXTENDED_MESSAGE:
switch (ms->msgin[2]) {
case EXTENDED_MODIFY_DATA_POINTER:
ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6]
+ (ms->msgin[4] << 16) + (ms->msgin[5] << 8);
break;
case EXTENDED_SDTR:
if (tp->sdtr_state != sdtr_sent) {
/* reply with an SDTR */
add_sdtr_msg(ms);
/* limit period to at least his value,
offset to no more than his */
if (ms->msgout[3] < ms->msgin[3])
ms->msgout[3] = ms->msgin[3];
if (ms->msgout[4] > ms->msgin[4])
ms->msgout[4] = ms->msgin[4];
set_sdtr(ms, ms->msgout[3], ms->msgout[4]);
ms->msgphase = msg_out;
} else {
set_sdtr(ms, ms->msgin[3], ms->msgin[4]);
}
break;
default:
goto reject;
}
break;
case SAVE_POINTERS:
tp->saved_ptr = ms->data_ptr;
break;
case RESTORE_POINTERS:
ms->data_ptr = tp->saved_ptr;
break;
case DISCONNECT:
ms->phase = disconnecting;
break;
case ABORT:
break;
case MESSAGE_REJECT:
if (tp->sdtr_state == sdtr_sent)
set_sdtr(ms, 0, 0);
break;
case NOP:
break;
default:
if (IDENTIFY_BASE <= code && code <= IDENTIFY_BASE + 7) {
if (cmd == NULL) {
do_abort(ms);
ms->msgphase = msg_out;
} else if (code != cmd->device->lun + IDENTIFY_BASE) {
printk(KERN_WARNING "mesh: lun mismatch "
"(%d != %d) on reselection from "
"target %d\n", code - IDENTIFY_BASE,
cmd->device->lun, ms->conn_tgt);
}
break;
}
goto reject;
}
return;
reject:
printk(KERN_WARNING "mesh: rejecting message from target %d:",
ms->conn_tgt);
for (i = 0; i < ms->n_msgin; ++i)
printk(" %x", ms->msgin[i]);
printk("\n");
ms->msgout[0] = MESSAGE_REJECT;
ms->n_msgout = 1;
ms->msgphase = msg_out;
}
/*
* Set up DMA commands for transferring data.
*/
static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
{
int i, dma_cmd, total, off, dtot;
struct scatterlist *scl;
struct dbdma_cmd *dcmds;
dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out?
OUTPUT_MORE: INPUT_MORE;
dcmds = ms->dma_cmds;
dtot = 0;
if (cmd) {
int nseg;
cmd->SCp.this_residual = scsi_bufflen(cmd);
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
if (nseg) {
total = 0;
off = ms->data_ptr;
scsi_for_each_sg(cmd, scl, nseg, i) {
u32 dma_addr = sg_dma_address(scl);
u32 dma_len = sg_dma_len(scl);
total += scl->length;
if (off >= dma_len) {
off -= dma_len;
continue;
}
if (dma_len > 0xffff)
panic("mesh: scatterlist element >= 64k");
st_le16(&dcmds->req_count, dma_len - off);
st_le16(&dcmds->command, dma_cmd);
st_le32(&dcmds->phy_addr, dma_addr + off);
dcmds->xfer_status = 0;
++dcmds;
dtot += dma_len - off;
off = 0;
}
}
}
if (dtot == 0) {
/* Either the target has overrun our buffer,
or the caller didn't provide a buffer. */
static char mesh_extra_buf[64];
dtot = sizeof(mesh_extra_buf);
st_le16(&dcmds->req_count, dtot);
st_le32(&dcmds->phy_addr, virt_to_phys(mesh_extra_buf));
dcmds->xfer_status = 0;
++dcmds;
}
dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
st_le16(&dcmds[-1].command, dma_cmd);
memset(dcmds, 0, sizeof(*dcmds));
st_le16(&dcmds->command, DBDMA_STOP);
ms->dma_count = dtot;
}
static void halt_dma(struct mesh_state *ms)
{
volatile struct dbdma_regs __iomem *md = ms->dma;
volatile struct mesh_regs __iomem *mr = ms->mesh;
struct scsi_cmnd *cmd = ms->current_req;
int t, nb;
if (!ms->tgts[ms->conn_tgt].data_goes_out) {
/* wait a little while until the fifo drains */
t = 50;
while (t > 0 && in_8(&mr->fifo_count) != 0
&& (in_le32(&md->status) & ACTIVE) != 0) {
--t;
udelay(1);
}
}
out_le32(&md->control, RUN << 16); /* turn off RUN bit */
nb = (mr->count_hi << 8) + mr->count_lo;
dlog(ms, "halt_dma fc/count=%.6x",
MKWORD(0, mr->fifo_count, 0, nb));
if (ms->tgts[ms->conn_tgt].data_goes_out)
nb += mr->fifo_count;
/* nb is the number of bytes not yet transferred
to/from the target. */
ms->data_ptr -= nb;
dlog(ms, "data_ptr %x", ms->data_ptr);
if (ms->data_ptr < 0) {
printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n",
ms->data_ptr, nb, ms);
ms->data_ptr = 0;
#ifdef MESH_DBG
dumplog(ms, ms->conn_tgt);
dumpslog(ms);
#endif /* MESH_DBG */
} else if (cmd && scsi_bufflen(cmd) &&
ms->data_ptr > scsi_bufflen(cmd)) {
printk(KERN_DEBUG "mesh: target %d overrun, "
"data_ptr=%x total=%x goes_out=%d\n",
ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
ms->tgts[ms->conn_tgt].data_goes_out);
}
scsi_dma_unmap(cmd);
ms->dma_started = 0;
}
static void phase_mismatch(struct mesh_state *ms)
{
volatile struct mesh_regs __iomem *mr = ms->mesh;
int phase;
dlog(ms, "phasemm ch/cl/seq/fc=%.8x",
MKWORD(mr->count_hi, mr->count_lo, mr->sequence, mr->fifo_count));
phase = in_8(&mr->bus_status0) & BS0_PHASE;
if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) {
/* output the last byte of the message, without ATN */
out_8(&mr->count_lo, 1);
out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
mesh_flush_io(mr);
udelay(1);
out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
ms->msgphase = msg_out_last;
return;
}
if (ms->msgphase == msg_in) {
get_msgin(ms);
if (ms->n_msgin)
handle_msgin(ms);
}
if (ms->dma_started)
halt_dma(ms);
if (mr->fifo_count) {
out_8(&mr->sequence, SEQ_FLUSHFIFO);
mesh_flush_io(mr);
udelay(1);
}
ms->msgphase = msg_none;
switch (phase) {
case BP_DATAIN:
ms->tgts[ms->conn_tgt].data_goes_out = 0;
ms->phase = dataing;
break;
case BP_DATAOUT:
ms->tgts[ms->conn_tgt].data_goes_out = 1;
ms->phase = dataing;
break;
case BP_COMMAND:
ms->phase = commanding;
break;
case BP_STATUS:
ms->phase = statusing;
break;
case BP_MSGIN:
ms->msgphase = msg_in;
ms->n_msgin = 0;
break;
case BP_MSGOUT:
ms->msgphase = msg_out;
if (ms->n_msgout == 0) {
if (ms->aborting) {
do_abort(ms);
} else {
if (ms->last_n_msgout == 0) {
printk(KERN_DEBUG
"mesh: no msg to repeat\n");
ms->msgout[0] = NOP;
ms->last_n_msgout = 1;
}
ms->n_msgout = ms->last_n_msgout;
}
}
break;
default:
printk(KERN_DEBUG "mesh: unknown scsi phase %x\n", phase);
ms->stat = DID_ERROR;
mesh_done(ms, 1);
return;
}
start_phase(ms);
}
static void cmd_complete(struct mesh_state *ms)
{
volatile struct mesh_regs __iomem *mr = ms->mesh;
struct scsi_cmnd *cmd = ms->current_req;
struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
int seq, n, t;
dlog(ms, "cmd_complete fc=%x", mr->fifo_count);
seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
switch (ms->msgphase) {
case msg_out_xxx:
/* huh? we expected a phase mismatch */
ms->n_msgin = 0;
ms->msgphase = msg_in;
/* fall through */
case msg_in:
/* should have some message bytes in fifo */
get_msgin(ms);
n = msgin_length(ms);
if (ms->n_msgin < n) {
out_8(&mr->count_lo, n - ms->n_msgin);
out_8(&mr->sequence, SEQ_MSGIN + seq);
} else {
ms->msgphase = msg_none;
handle_msgin(ms);
start_phase(ms);
}
break;
case msg_in_bad:
out_8(&mr->sequence, SEQ_FLUSHFIFO);
mesh_flush_io(mr);
udelay(1);
out_8(&mr->count_lo, 1);
out_8(&mr->sequence, SEQ_MSGIN + SEQ_ATN + use_active_neg);
break;
case msg_out:
/*
* To get the right timing on ATN wrt ACK, we have
* to get the MESH to drop ACK, wait until REQ gets
* asserted, then drop ATN. To do this we first
* issue a SEQ_MSGOUT with ATN and wait for REQ,
* then change the command to a SEQ_MSGOUT w/o ATN.
* If we don't see REQ in a reasonable time, we
* change the command to SEQ_MSGIN with ATN,
* wait for the phase mismatch interrupt, then
* issue the SEQ_MSGOUT without ATN.
*/
out_8(&mr->count_lo, 1);
out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg + SEQ_ATN);
t = 30; /* wait up to 30us */
while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0)
udelay(1);
dlog(ms, "last_mbyte err/exc/fc/cl=%.8x",
MKWORD(mr->error, mr->exception,
mr->fifo_count, mr->count_lo));
if (in_8(&mr->interrupt) & (INT_ERROR | INT_EXCEPTION)) {
/* whoops, target didn't do what we expected */
ms->last_n_msgout = ms->n_msgout;
ms->n_msgout = 0;
if (in_8(&mr->interrupt) & INT_ERROR) {
printk(KERN_ERR "mesh: error %x in msg_out\n",
in_8(&mr->error));
handle_error(ms);
return;
}
if (in_8(&mr->exception) != EXC_PHASEMM)
printk(KERN_ERR "mesh: exc %x in msg_out\n",
in_8(&mr->exception));
else
printk(KERN_DEBUG "mesh: bs0=%x in msg_out\n",
in_8(&mr->bus_status0));
handle_exception(ms);
return;
}
if (in_8(&mr->bus_status0) & BS0_REQ) {
out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
mesh_flush_io(mr);
udelay(1);
out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
ms->msgphase = msg_out_last;
} else {
out_8(&mr->sequence, SEQ_MSGIN + use_active_neg + SEQ_ATN);
ms->msgphase = msg_out_xxx;
}
break;
case msg_out_last:
ms->last_n_msgout = ms->n_msgout;
ms->n_msgout = 0;
ms->msgphase = ms->expect_reply? msg_in: msg_none;
start_phase(ms);
break;
case msg_none:
switch (ms->phase) {
case idle:
printk(KERN_ERR "mesh: interrupt in idle phase?\n");
dumpslog(ms);
return;
case selecting:
dlog(ms, "Selecting phase at command completion",0);
ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt),
(cmd? cmd->device->lun: 0));
ms->n_msgout = 1;
ms->expect_reply = 0;
if (ms->aborting) {
ms->msgout[0] = ABORT;
ms->n_msgout++;
} else if (tp->sdtr_state == do_sdtr) {
/* add SDTR message */
add_sdtr_msg(ms);
ms->expect_reply = 1;
tp->sdtr_state = sdtr_sent;
}
ms->msgphase = msg_out;
/*
* We need to wait for REQ before dropping ATN.
* We wait for at most 30us, then fall back to
* a scheme where we issue a SEQ_COMMAND with ATN,
* which will give us a phase mismatch interrupt
* when REQ does come, and then we send the message.
*/
t = 230; /* wait up to 230us */
while ((in_8(&mr->bus_status0) & BS0_REQ) == 0) {
if (--t < 0) {
dlog(ms, "impatient for req", ms->n_msgout);
ms->msgphase = msg_none;
break;
}
udelay(1);
}
break;
case dataing:
if (ms->dma_count != 0) {
start_phase(ms);
return;
}
/*
* We can get a phase mismatch here if the target
* changes to the status phase, even though we have
* had a command complete interrupt. Then, if we
* issue the SEQ_STATUS command, we'll get a sequence
* error interrupt. Which isn't so bad except that
* occasionally the mesh actually executes the
* SEQ_STATUS *as well as* giving us the sequence
* error and phase mismatch exception.
*/
out_8(&mr->sequence, 0);
out_8(&mr->interrupt,
INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
halt_dma(ms);
break;
case statusing:
if (cmd) {
cmd->SCp.Status = mr->fifo;
if (DEBUG_TARGET(cmd))
printk(KERN_DEBUG "mesh: status is %x\n",
cmd->SCp.Status);
}
ms->msgphase = msg_in;
break;
case busfreeing:
mesh_done(ms, 1);
return;
case disconnecting:
ms->current_req = NULL;
ms->phase = idle;
mesh_start(ms);
return;
default:
break;
}
++ms->phase;
start_phase(ms);
break;
}
}
/*
* Called by midlayer with host locked to queue a new
* request
*/
static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct mesh_state *ms;
cmd->scsi_done = done;
cmd->host_scribble = NULL;
ms = (struct mesh_state *) cmd->device->host->hostdata;
if (ms->request_q == NULL)
ms->request_q = cmd;
else
ms->request_qtail->host_scribble = (void *) cmd;
ms->request_qtail = cmd;
if (ms->phase == idle)
mesh_start(ms);
return 0;
}
static DEF_SCSI_QCMD(mesh_queue)
/*
* Called to handle interrupts, either call by the interrupt
* handler (do_mesh_interrupt) or by other functions in
* exceptional circumstances
*/
static void mesh_interrupt(struct mesh_state *ms)
{
volatile struct mesh_regs __iomem *mr = ms->mesh;
int intr;
#if 0
if (ALLOW_DEBUG(ms->conn_tgt))
printk(KERN_DEBUG "mesh_intr, bs0=%x int=%x exc=%x err=%x "
"phase=%d msgphase=%d\n", mr->bus_status0,
mr->interrupt, mr->exception, mr->error,
ms->phase, ms->msgphase);
#endif
while ((intr = in_8(&mr->interrupt)) != 0) {
dlog(ms, "interrupt intr/err/exc/seq=%.8x",
MKWORD(intr, mr->error, mr->exception, mr->sequence));
if (intr & INT_ERROR) {
handle_error(ms);
} else if (intr & INT_EXCEPTION) {
handle_exception(ms);
} else if (intr & INT_CMDDONE) {
out_8(&mr->interrupt, INT_CMDDONE);
cmd_complete(ms);
}
}
}
/* Todo: here we can at least try to remove the command from the
* queue if it isn't connected yet, and for pending command, assert
* ATN until the bus gets freed.
*/
static int mesh_abort(struct scsi_cmnd *cmd)
{
struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
printk(KERN_DEBUG "mesh_abort(%p)\n", cmd);
mesh_dump_regs(ms);
dumplog(ms, cmd->device->id);
dumpslog(ms);
return FAILED;
}
/*
* Called by the midlayer with the lock held to reset the
* SCSI host and bus.
* The midlayer will wait for devices to come back, we don't need
* to do that ourselves
*/
static int mesh_host_reset(struct scsi_cmnd *cmd)
{
struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
volatile struct mesh_regs __iomem *mr = ms->mesh;
volatile struct dbdma_regs __iomem *md = ms->dma;
unsigned long flags;
printk(KERN_DEBUG "mesh_host_reset\n");
spin_lock_irqsave(ms->host->host_lock, flags);
/* Reset the controller & dbdma channel */
out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
out_8(&mr->exception, 0xff); /* clear all exception bits */
out_8(&mr->error, 0xff); /* clear all error bits */
out_8(&mr->sequence, SEQ_RESETMESH);
mesh_flush_io(mr);
udelay(1);
out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
out_8(&mr->source_id, ms->host->this_id);
out_8(&mr->sel_timeout, 25); /* 250ms */
out_8(&mr->sync_params, ASYNC_PARAMS);
/* Reset the bus */
out_8(&mr->bus_status1, BS1_RST); /* assert RST */
mesh_flush_io(mr);
udelay(30); /* leave it on for >= 25us */
out_8(&mr->bus_status1, 0); /* negate RST */
/* Complete pending commands */
handle_reset(ms);
spin_unlock_irqrestore(ms->host->host_lock, flags);
return SUCCESS;
}
static void set_mesh_power(struct mesh_state *ms, int state)
{
if (!machine_is(powermac))
return;
if (state) {
pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1);
msleep(200);
} else {
pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0);
msleep(10);
}
}
#ifdef CONFIG_PM
static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
{
struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
unsigned long flags;
switch (mesg.event) {
case PM_EVENT_SUSPEND:
case PM_EVENT_HIBERNATE:
case PM_EVENT_FREEZE:
break;
default:
return 0;
}
if (ms->phase == sleeping)
return 0;
scsi_block_requests(ms->host);
spin_lock_irqsave(ms->host->host_lock, flags);
while(ms->phase != idle) {
spin_unlock_irqrestore(ms->host->host_lock, flags);
msleep(10);
spin_lock_irqsave(ms->host->host_lock, flags);
}
ms->phase = sleeping;
spin_unlock_irqrestore(ms->host->host_lock, flags);
disable_irq(ms->meshintr);
set_mesh_power(ms, 0);
return 0;
}
static int mesh_resume(struct macio_dev *mdev)
{
struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
unsigned long flags;
if (ms->phase != sleeping)
return 0;
set_mesh_power(ms, 1);
mesh_init(ms);
spin_lock_irqsave(ms->host->host_lock, flags);
mesh_start(ms);
spin_unlock_irqrestore(ms->host->host_lock, flags);
enable_irq(ms->meshintr);
scsi_unblock_requests(ms->host);
return 0;
}
#endif /* CONFIG_PM */
/*
* If we leave drives set for synchronous transfers (especially
* CDROMs), and reboot to MacOS, it gets confused, poor thing.
* So, on reboot we reset the SCSI bus.
*/
static int mesh_shutdown(struct macio_dev *mdev)
{
struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
volatile struct mesh_regs __iomem *mr;
unsigned long flags;
printk(KERN_INFO "resetting MESH scsi bus(es)\n");
spin_lock_irqsave(ms->host->host_lock, flags);
mr = ms->mesh;
out_8(&mr->intr_mask, 0);
out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
out_8(&mr->bus_status1, BS1_RST);
mesh_flush_io(mr);
udelay(30);
out_8(&mr->bus_status1, 0);
spin_unlock_irqrestore(ms->host->host_lock, flags);
return 0;
}
static struct scsi_host_template mesh_template = {
.proc_name = "mesh",
.name = "MESH",
.queuecommand = mesh_queue,
.eh_abort_handler = mesh_abort,
.eh_host_reset_handler = mesh_host_reset,
.can_queue = 20,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.use_clustering = DISABLE_CLUSTERING,
};
static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
{
struct device_node *mesh = macio_get_of_node(mdev);
struct pci_dev* pdev = macio_get_pci_dev(mdev);
int tgt, minper;
const int *cfp;
struct mesh_state *ms;
struct Scsi_Host *mesh_host;
void *dma_cmd_space;
dma_addr_t dma_cmd_bus;
switch (mdev->bus->chip->type) {
case macio_heathrow:
case macio_gatwick:
case macio_paddington:
use_active_neg = 0;
break;
default:
use_active_neg = SEQ_ACTIVE_NEG;
}
if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs"
" (got %d,%d)\n", macio_resource_count(mdev),
macio_irq_count(mdev));
return -ENODEV;
}
if (macio_request_resources(mdev, "mesh") != 0) {
printk(KERN_ERR "mesh: unable to request memory resources");
return -EBUSY;
}
mesh_host = scsi_host_alloc(&mesh_template, sizeof(struct mesh_state));
if (mesh_host == NULL) {
printk(KERN_ERR "mesh: couldn't register host");
goto out_release;
}
/* Old junk for root discovery, that will die ultimately */
#if !defined(MODULE)
note_scsi_host(mesh, mesh_host);
#endif
mesh_host->base = macio_resource_start(mdev, 0);
mesh_host->irq = macio_irq(mdev, 0);
ms = (struct mesh_state *) mesh_host->hostdata;
macio_set_drvdata(mdev, ms);
ms->host = mesh_host;
ms->mdev = mdev;
ms->pdev = pdev;
ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000);
if (ms->mesh == NULL) {
printk(KERN_ERR "mesh: can't map registers\n");
goto out_free;
}
ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
if (ms->dma == NULL) {
printk(KERN_ERR "mesh: can't map registers\n");
iounmap(ms->mesh);
goto out_free;
}
ms->meshintr = macio_irq(mdev, 0);
ms->dmaintr = macio_irq(mdev, 1);
/* Space for dma command list: +1 for stop command,
* +1 to allow for aligning.
*/
ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd);
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
dma_cmd_space = pci_alloc_consistent(macio_get_pci_dev(mdev),
ms->dma_cmd_size,
&dma_cmd_bus);
if (dma_cmd_space == NULL) {
printk(KERN_ERR "mesh: can't allocate DMA table\n");
goto out_unmap;
}
memset(dma_cmd_space, 0, ms->dma_cmd_size);
ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space);
ms->dma_cmd_space = dma_cmd_space;
ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds)
- (unsigned long)dma_cmd_space;
ms->current_req = NULL;
for (tgt = 0; tgt < 8; ++tgt) {
ms->tgts[tgt].sdtr_state = do_sdtr;
ms->tgts[tgt].sync_params = ASYNC_PARAMS;
ms->tgts[tgt].current_req = NULL;
}
if ((cfp = of_get_property(mesh, "clock-frequency", NULL)))
ms->clk_freq = *cfp;
else {
printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n");
ms->clk_freq = 50000000;
}
/* The maximum sync rate is clock / 5; increase
* mesh_sync_period if necessary.
*/
minper = 1000000000 / (ms->clk_freq / 5); /* ns */
if (mesh_sync_period < minper)
mesh_sync_period = minper;
/* Power up the chip */
set_mesh_power(ms, 1);
/* Set it up */
mesh_init(ms);
/* Request interrupt */
if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
goto out_shutdown;
}
/* Add scsi host & scan */
if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
goto out_release_irq;
scsi_scan_host(mesh_host);
return 0;
out_release_irq:
free_irq(ms->meshintr, ms);
out_shutdown:
/* shutdown & reset bus in case of error or macos can be confused
* at reboot if the bus was set to synchronous mode already
*/
mesh_shutdown(mdev);
set_mesh_power(ms, 0);
pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
ms->dma_cmd_space, ms->dma_cmd_bus);
out_unmap:
iounmap(ms->dma);
iounmap(ms->mesh);
out_free:
scsi_host_put(mesh_host);
out_release:
macio_release_resources(mdev);
return -ENODEV;
}
static int mesh_remove(struct macio_dev *mdev)
{
struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
struct Scsi_Host *mesh_host = ms->host;
scsi_remove_host(mesh_host);
free_irq(ms->meshintr, ms);
/* Reset scsi bus */
mesh_shutdown(mdev);
/* Shut down chip & termination */
set_mesh_power(ms, 0);
/* Unmap registers & dma controller */
iounmap(ms->mesh);
iounmap(ms->dma);
/* Free DMA commands memory */
pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
ms->dma_cmd_space, ms->dma_cmd_bus);
/* Release memory resources */
macio_release_resources(mdev);
scsi_host_put(mesh_host);
return 0;
}
static struct of_device_id mesh_match[] =
{
{
.name = "mesh",
},
{
.type = "scsi",
.compatible = "chrp,mesh0"
},
{},
};
MODULE_DEVICE_TABLE (of, mesh_match);
static struct macio_driver mesh_driver =
{
.driver = {
.name = "mesh",
.owner = THIS_MODULE,
.of_match_table = mesh_match,
},
.probe = mesh_probe,
.remove = mesh_remove,
.shutdown = mesh_shutdown,
#ifdef CONFIG_PM
.suspend = mesh_suspend,
.resume = mesh_resume,
#endif
};
static int __init init_mesh(void)
{
/* Calculate sync rate from module parameters */
if (sync_rate > 10)
sync_rate = 10;
if (sync_rate > 0) {
printk(KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate);
mesh_sync_period = 1000 / sync_rate; /* ns */
mesh_sync_offset = 15;
} else
printk(KERN_INFO "mesh: configured for asynchronous\n");
return macio_register_driver(&mesh_driver);
}
static void __exit exit_mesh(void)
{
return macio_unregister_driver(&mesh_driver);
}
module_init(init_mesh);
module_exit(exit_mesh);
| gpl-2.0 |
edgestorm/lu3700-cm-gin | fs/minix/itree_v2.c | 9091 | 1925 | #include <linux/buffer_head.h>
#include "minix.h"
enum {DIRECT = 7, DEPTH = 4}; /* Have triple indirect */
typedef u32 block_t; /* 32 bit, host order */
static inline unsigned long block_to_cpu(block_t n)
{
return n;
}
static inline block_t cpu_to_block(unsigned long n)
{
return n;
}
static inline block_t *i_data(struct inode *inode)
{
return (block_t *)minix_i(inode)->u.i2_data;
}
#define DIRCOUNT 7
#define INDIRCOUNT(sb) (1 << ((sb)->s_blocksize_bits - 2))
static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
{
int n = 0;
char b[BDEVNAME_SIZE];
struct super_block *sb = inode->i_sb;
if (block < 0) {
printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n",
block, bdevname(sb->s_bdev, b));
} else if (block >= (minix_sb(inode->i_sb)->s_max_size/sb->s_blocksize)) {
if (printk_ratelimit())
printk("MINIX-fs: block_to_path: "
"block %ld too big on dev %s\n",
block, bdevname(sb->s_bdev, b));
} else if (block < DIRCOUNT) {
offsets[n++] = block;
} else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) {
offsets[n++] = DIRCOUNT;
offsets[n++] = block;
} else if ((block -= INDIRCOUNT(sb)) < INDIRCOUNT(sb) * INDIRCOUNT(sb)) {
offsets[n++] = DIRCOUNT + 1;
offsets[n++] = block / INDIRCOUNT(sb);
offsets[n++] = block % INDIRCOUNT(sb);
} else {
block -= INDIRCOUNT(sb) * INDIRCOUNT(sb);
offsets[n++] = DIRCOUNT + 2;
offsets[n++] = (block / INDIRCOUNT(sb)) / INDIRCOUNT(sb);
offsets[n++] = (block / INDIRCOUNT(sb)) % INDIRCOUNT(sb);
offsets[n++] = block % INDIRCOUNT(sb);
}
return n;
}
#include "itree_common.c"
int V2_minix_get_block(struct inode * inode, long block,
struct buffer_head *bh_result, int create)
{
return get_block(inode, block, bh_result, create);
}
void V2_minix_truncate(struct inode * inode)
{
truncate(inode);
}
unsigned V2_minix_blocks(loff_t size, struct super_block *sb)
{
return nblocks(size, sb);
}
| gpl-2.0 |
antaril/AGK_for_DU | drivers/usb/serial/ark3116.c | 388 | 23492 | /*
* Copyright (C) 2009 by Bart Hartgers (bart.hartgers+ark3116@gmail.com)
* Original version:
* Copyright (C) 2006
* Simon Schulz (ark3116_driver <at> auctionant.de)
*
* ark3116
* - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547,
* productid=0x0232) (used in a datacable called KQ-U8A)
*
* Supports full modem status lines, break, hardware flow control. Does not
* support software flow control, since I do not know how to enable it in hw.
*
* This driver is a essentially new implementation. I initially dug
* into the old ark3116.c driver and suddenly realized the ark3116 is
* a 16450 with a USB interface glued to it. See comments at the
* bottom of this file.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
static bool debug;
/*
* Version information
*/
#define DRIVER_VERSION "v0.7"
#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
#define DRIVER_NAME "ark3116"
/* usb timeout of 1 second */
#define ARK_TIMEOUT 1000
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x6547, 0x0232) },
{ USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static int is_irda(struct usb_serial *serial)
{
struct usb_device *dev = serial->dev;
if (le16_to_cpu(dev->descriptor.idVendor) == 0x18ec &&
le16_to_cpu(dev->descriptor.idProduct) == 0x3118)
return 1;
return 0;
}
struct ark3116_private {
wait_queue_head_t delta_msr_wait;
struct async_icount icount;
int irda; /* 1 for irda device */
/* protects hw register updates */
struct mutex hw_lock;
int quot; /* baudrate divisor */
__u32 lcr; /* line control register value */
__u32 hcr; /* handshake control register (0x8)
* value */
__u32 mcr; /* modem contol register value */
/* protects the status values below */
spinlock_t status_lock;
__u32 msr; /* modem status register value */
__u32 lsr; /* line status register value */
};
static int ark3116_write_reg(struct usb_serial *serial,
unsigned reg, __u8 val)
{
int result;
/* 0xfe 0x40 are magic values taken from original driver */
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
0xfe, 0x40, val, reg,
NULL, 0, ARK_TIMEOUT);
return result;
}
static int ark3116_read_reg(struct usb_serial *serial,
unsigned reg, unsigned char *buf)
{
int result;
/* 0xfe 0xc0 are magic values taken from original driver */
result = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
0xfe, 0xc0, 0, reg,
buf, 1, ARK_TIMEOUT);
if (result < 0)
return result;
else
return buf[0];
}
static inline int calc_divisor(int bps)
{
/* Original ark3116 made some exceptions in rounding here
* because windows did the same. Assume that is not really
* necessary.
* Crystal is 12MHz, probably because of USB, but we divide by 4?
*/
return (12000000 + 2*bps) / (4*bps);
}
static int ark3116_attach(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct ark3116_private *priv;
/* make sure we have our end-points */
if ((serial->num_bulk_in == 0) ||
(serial->num_bulk_out == 0) ||
(serial->num_interrupt_in == 0)) {
dev_err(&serial->dev->dev,
"%s - missing endpoint - "
"bulk in: %d, bulk out: %d, int in %d\n",
KBUILD_MODNAME,
serial->num_bulk_in,
serial->num_bulk_out,
serial->num_interrupt_in);
return -EINVAL;
}
priv = kzalloc(sizeof(struct ark3116_private),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
init_waitqueue_head(&priv->delta_msr_wait);
mutex_init(&priv->hw_lock);
spin_lock_init(&priv->status_lock);
priv->irda = is_irda(serial);
usb_set_serial_port_data(port, priv);
/* setup the hardware */
ark3116_write_reg(serial, UART_IER, 0);
/* disable DMA */
ark3116_write_reg(serial, UART_FCR, 0);
/* handshake control */
priv->hcr = 0;
ark3116_write_reg(serial, 0x8 , 0);
/* modem control */
priv->mcr = 0;
ark3116_write_reg(serial, UART_MCR, 0);
if (!(priv->irda)) {
ark3116_write_reg(serial, 0xb , 0);
} else {
ark3116_write_reg(serial, 0xb , 1);
ark3116_write_reg(serial, 0xc , 0);
ark3116_write_reg(serial, 0xd , 0x41);
ark3116_write_reg(serial, 0xa , 1);
}
/* setup baudrate */
ark3116_write_reg(serial, UART_LCR, UART_LCR_DLAB);
/* setup for 9600 8N1 */
priv->quot = calc_divisor(9600);
ark3116_write_reg(serial, UART_DLL, priv->quot & 0xff);
ark3116_write_reg(serial, UART_DLM, (priv->quot>>8) & 0xff);
priv->lcr = UART_LCR_WLEN8;
ark3116_write_reg(serial, UART_LCR, UART_LCR_WLEN8);
ark3116_write_reg(serial, 0xe, 0);
if (priv->irda)
ark3116_write_reg(serial, 0x9, 0);
dev_info(&serial->dev->dev,
"%s using %s mode\n",
KBUILD_MODNAME,
priv->irda ? "IrDA" : "RS232");
return 0;
}
static void ark3116_release(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct ark3116_private *priv = usb_get_serial_port_data(port);
/* device is closed, so URBs and DMA should be down */
usb_set_serial_port_data(port, NULL);
mutex_destroy(&priv->hw_lock);
kfree(priv);
}
static void ark3116_init_termios(struct tty_struct *tty)
{
struct ktermios *termios = tty->termios;
*termios = tty_std_termios;
termios->c_cflag = B9600 | CS8
| CREAD | HUPCL | CLOCAL;
termios->c_ispeed = 9600;
termios->c_ospeed = 9600;
}
static void ark3116_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = tty->termios;
unsigned int cflag = termios->c_cflag;
int bps = tty_get_baud_rate(tty);
int quot;
__u8 lcr, hcr, eval;
/* set data bit count */
switch (cflag & CSIZE) {
case CS5:
lcr = UART_LCR_WLEN5;
break;
case CS6:
lcr = UART_LCR_WLEN6;
break;
case CS7:
lcr = UART_LCR_WLEN7;
break;
default:
case CS8:
lcr = UART_LCR_WLEN8;
break;
}
if (cflag & CSTOPB)
lcr |= UART_LCR_STOP;
if (cflag & PARENB)
lcr |= UART_LCR_PARITY;
if (!(cflag & PARODD))
lcr |= UART_LCR_EPAR;
#ifdef CMSPAR
if (cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
#endif
/* handshake control */
hcr = (cflag & CRTSCTS) ? 0x03 : 0x00;
/* calc baudrate */
dbg("%s - setting bps to %d", __func__, bps);
eval = 0;
switch (bps) {
case 0:
quot = calc_divisor(9600);
break;
default:
if ((bps < 75) || (bps > 3000000))
bps = 9600;
quot = calc_divisor(bps);
break;
case 460800:
eval = 1;
quot = calc_divisor(bps);
break;
case 921600:
eval = 2;
quot = calc_divisor(bps);
break;
}
/* Update state: synchronize */
mutex_lock(&priv->hw_lock);
/* keep old LCR_SBC bit */
lcr |= (priv->lcr & UART_LCR_SBC);
dbg("%s - setting hcr:0x%02x,lcr:0x%02x,quot:%d",
__func__, hcr, lcr, quot);
/* handshake control */
if (priv->hcr != hcr) {
priv->hcr = hcr;
ark3116_write_reg(serial, 0x8, hcr);
}
/* baudrate */
if (priv->quot != quot) {
priv->quot = quot;
priv->lcr = lcr; /* need to write lcr anyway */
/* disable DMA since transmit/receive is
* shadowed by UART_DLL
*/
ark3116_write_reg(serial, UART_FCR, 0);
ark3116_write_reg(serial, UART_LCR,
lcr|UART_LCR_DLAB);
ark3116_write_reg(serial, UART_DLL, quot & 0xff);
ark3116_write_reg(serial, UART_DLM, (quot>>8) & 0xff);
/* restore lcr */
ark3116_write_reg(serial, UART_LCR, lcr);
/* magic baudrate thingy: not sure what it does,
* but windows does this as well.
*/
ark3116_write_reg(serial, 0xe, eval);
/* enable DMA */
ark3116_write_reg(serial, UART_FCR, UART_FCR_DMA_SELECT);
} else if (priv->lcr != lcr) {
priv->lcr = lcr;
ark3116_write_reg(serial, UART_LCR, lcr);
}
mutex_unlock(&priv->hw_lock);
/* check for software flow control */
if (I_IXOFF(tty) || I_IXON(tty)) {
dev_warn(&serial->dev->dev,
"%s: don't know how to do software flow control\n",
KBUILD_MODNAME);
}
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, bps, bps);
}
static void ark3116_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
if (serial->dev) {
/* disable DMA */
ark3116_write_reg(serial, UART_FCR, 0);
/* deactivate interrupts */
ark3116_write_reg(serial, UART_IER, 0);
usb_serial_generic_close(port);
if (serial->num_interrupt_in)
usb_kill_urb(port->interrupt_in_urb);
}
}
static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
unsigned char *buf;
int result;
buf = kmalloc(1, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
result = usb_serial_generic_open(tty, port);
if (result) {
dbg("%s - usb_serial_generic_open failed: %d",
__func__, result);
goto err_out;
}
/* remove any data still left: also clears error state */
ark3116_read_reg(serial, UART_RX, buf);
/* read modem status */
priv->msr = ark3116_read_reg(serial, UART_MSR, buf);
/* read line status */
priv->lsr = ark3116_read_reg(serial, UART_LSR, buf);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "submit irq_in urb failed %d\n",
result);
ark3116_close(port);
goto err_out;
}
/* activate interrupts */
ark3116_write_reg(port->serial, UART_IER, UART_IER_MSI|UART_IER_RLSI);
/* enable DMA */
ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
/* setup termios */
if (tty)
ark3116_set_termios(tty, port, NULL);
err_out:
kfree(buf);
return result;
}
static int ark3116_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct async_icount cnow = priv->icount;
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
static int ark3116_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct serial_struct serstruct;
void __user *user_arg = (void __user *)arg;
switch (cmd) {
case TIOCGSERIAL:
/* XXX: Some of these values are probably wrong. */
memset(&serstruct, 0, sizeof(serstruct));
serstruct.type = PORT_16654;
serstruct.line = port->serial->minor;
serstruct.port = port->number;
serstruct.custom_divisor = 0;
serstruct.baud_base = 460800;
if (copy_to_user(user_arg, &serstruct, sizeof(serstruct)))
return -EFAULT;
return 0;
case TIOCSSERIAL:
if (copy_from_user(&serstruct, user_arg, sizeof(serstruct)))
return -EFAULT;
return 0;
case TIOCMIWAIT:
for (;;) {
struct async_icount prev = priv->icount;
interruptible_sleep_on(&priv->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
if ((prev.rng == priv->icount.rng) &&
(prev.dsr == priv->icount.dsr) &&
(prev.dcd == priv->icount.dcd) &&
(prev.cts == priv->icount.cts))
return -EIO;
if ((arg & TIOCM_RNG &&
(prev.rng != priv->icount.rng)) ||
(arg & TIOCM_DSR &&
(prev.dsr != priv->icount.dsr)) ||
(arg & TIOCM_CD &&
(prev.dcd != priv->icount.dcd)) ||
(arg & TIOCM_CTS &&
(prev.cts != priv->icount.cts)))
return 0;
}
break;
}
return -ENOIOCTLCMD;
}
static int ark3116_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
__u32 status;
__u32 ctrl;
unsigned long flags;
mutex_lock(&priv->hw_lock);
ctrl = priv->mcr;
mutex_unlock(&priv->hw_lock);
spin_lock_irqsave(&priv->status_lock, flags);
status = priv->msr;
spin_unlock_irqrestore(&priv->status_lock, flags);
return (status & UART_MSR_DSR ? TIOCM_DSR : 0) |
(status & UART_MSR_CTS ? TIOCM_CTS : 0) |
(status & UART_MSR_RI ? TIOCM_RI : 0) |
(status & UART_MSR_DCD ? TIOCM_CD : 0) |
(ctrl & UART_MCR_DTR ? TIOCM_DTR : 0) |
(ctrl & UART_MCR_RTS ? TIOCM_RTS : 0) |
(ctrl & UART_MCR_OUT1 ? TIOCM_OUT1 : 0) |
(ctrl & UART_MCR_OUT2 ? TIOCM_OUT2 : 0);
}
static int ark3116_tiocmset(struct tty_struct *tty,
unsigned set, unsigned clr)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
/* we need to take the mutex here, to make sure that the value
* in priv->mcr is actually the one that is in the hardware
*/
mutex_lock(&priv->hw_lock);
if (set & TIOCM_RTS)
priv->mcr |= UART_MCR_RTS;
if (set & TIOCM_DTR)
priv->mcr |= UART_MCR_DTR;
if (set & TIOCM_OUT1)
priv->mcr |= UART_MCR_OUT1;
if (set & TIOCM_OUT2)
priv->mcr |= UART_MCR_OUT2;
if (clr & TIOCM_RTS)
priv->mcr &= ~UART_MCR_RTS;
if (clr & TIOCM_DTR)
priv->mcr &= ~UART_MCR_DTR;
if (clr & TIOCM_OUT1)
priv->mcr &= ~UART_MCR_OUT1;
if (clr & TIOCM_OUT2)
priv->mcr &= ~UART_MCR_OUT2;
ark3116_write_reg(port->serial, UART_MCR, priv->mcr);
mutex_unlock(&priv->hw_lock);
return 0;
}
static void ark3116_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
/* LCR is also used for other things: protect access */
mutex_lock(&priv->hw_lock);
if (break_state)
priv->lcr |= UART_LCR_SBC;
else
priv->lcr &= ~UART_LCR_SBC;
ark3116_write_reg(port->serial, UART_LCR, priv->lcr);
mutex_unlock(&priv->hw_lock);
}
static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->status_lock, flags);
priv->msr = msr;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (msr & UART_MSR_ANY_DELTA) {
/* update input line counters */
if (msr & UART_MSR_DCTS)
priv->icount.cts++;
if (msr & UART_MSR_DDSR)
priv->icount.dsr++;
if (msr & UART_MSR_DDCD)
priv->icount.dcd++;
if (msr & UART_MSR_TERI)
priv->icount.rng++;
wake_up_interruptible(&priv->delta_msr_wait);
}
}
static void ark3116_update_lsr(struct usb_serial_port *port, __u8 lsr)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->status_lock, flags);
/* combine bits */
priv->lsr |= lsr;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (lsr&UART_LSR_BRK_ERROR_BITS) {
if (lsr & UART_LSR_BI)
priv->icount.brk++;
if (lsr & UART_LSR_FE)
priv->icount.frame++;
if (lsr & UART_LSR_PE)
priv->icount.parity++;
if (lsr & UART_LSR_OE)
priv->icount.overrun++;
}
}
static void ark3116_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int status = urb->status;
const __u8 *data = urb->transfer_buffer;
int result;
switch (status) {
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
__func__, status);
break;
case 0: /* success */
/* discovered this by trail and error... */
if ((urb->actual_length == 4) && (data[0] == 0xe8)) {
const __u8 id = data[1]&UART_IIR_ID;
dbg("%s: iir=%02x", __func__, data[1]);
if (id == UART_IIR_MSI) {
dbg("%s: msr=%02x", __func__, data[3]);
ark3116_update_msr(port, data[3]);
break;
} else if (id == UART_IIR_RLSI) {
dbg("%s: lsr=%02x", __func__, data[2]);
ark3116_update_lsr(port, data[2]);
break;
}
}
/*
* Not sure what this data meant...
*/
usb_serial_debug_data(debug, &port->dev,
__func__,
urb->actual_length,
urb->transfer_buffer);
break;
}
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, result);
}
/* Data comes in via the bulk (data) URB, erors/interrupts via the int URB.
* This means that we cannot be sure which data byte has an associated error
* condition, so we report an error for all data in the next bulk read.
*
* Actually, there might even be a window between the bulk data leaving the
* ark and reading/resetting the lsr in the read_bulk_callback where an
* interrupt for the next data block could come in.
* Without somekind of ordering on the ark, we would have to report the
* error for the next block of data as well...
* For now, let's pretend this can't happen.
*/
static void ark3116_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
__u32 lsr;
/* update line status */
spin_lock_irqsave(&priv->status_lock, flags);
lsr = priv->lsr;
priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (!urb->actual_length)
return;
tty = tty_port_tty_get(&port->port);
if (!tty)
return;
if (lsr & UART_LSR_BRK_ERROR_BITS) {
if (lsr & UART_LSR_BI)
tty_flag = TTY_BREAK;
else if (lsr & UART_LSR_PE)
tty_flag = TTY_PARITY;
else if (lsr & UART_LSR_FE)
tty_flag = TTY_FRAME;
/* overrun is special, not associated with a char */
if (lsr & UART_LSR_OE)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
urb->actual_length);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
static struct usb_driver ark3116_driver = {
.name = "ark3116",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
};
static struct usb_serial_driver ark3116_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ark3116",
},
.id_table = id_table,
.num_ports = 1,
.attach = ark3116_attach,
.release = ark3116_release,
.set_termios = ark3116_set_termios,
.init_termios = ark3116_init_termios,
.ioctl = ark3116_ioctl,
.tiocmget = ark3116_tiocmget,
.tiocmset = ark3116_tiocmset,
.get_icount = ark3116_get_icount,
.open = ark3116_open,
.close = ark3116_close,
.break_ctl = ark3116_break_ctl,
.read_int_callback = ark3116_read_int_callback,
.process_read_urb = ark3116_process_read_urb,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ark3116_device, NULL
};
module_usb_serial_driver(ark3116_driver, serial_drivers);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debug");
/*
* The following describes what I learned from studying the old
* ark3116.c driver, disassembling the windows driver, and some lucky
* guesses. Since I do not have any datasheet or other
* documentation, inaccuracies are almost guaranteed.
*
* Some specs for the ARK3116 can be found here:
* http://web.archive.org/web/20060318000438/
* www.arkmicro.com/en/products/view.php?id=10
* On that page, 2 GPIO pins are mentioned: I assume these are the
* OUT1 and OUT2 pins of the UART, so I added support for those
* through the MCR. Since the pins are not available on my hardware,
* I could not verify this.
* Also, it states there is "on-chip hardware flow control". I have
* discovered how to enable that. Unfortunately, I do not know how to
* enable XON/XOFF (software) flow control, which would need support
* from the chip as well to work. Because of the wording on the web
* page there is a real possibility the chip simply does not support
* software flow control.
*
* I got my ark3116 as part of a mobile phone adapter cable. On the
* PCB, the following numbered contacts are present:
*
* 1:- +5V
* 2:o DTR
* 3:i RX
* 4:i DCD
* 5:o RTS
* 6:o TX
* 7:i RI
* 8:i DSR
* 10:- 0V
* 11:i CTS
*
* On my chip, all signals seem to be 3.3V, but 5V tolerant. But that
* may be different for the one you have ;-).
*
* The windows driver limits the registers to 0-F, so I assume there
* are actually 16 present on the device.
*
* On an UART interrupt, 4 bytes of data come in on the interrupt
* endpoint. The bytes are 0xe8 IIR LSR MSR.
*
* The baudrate seems to be generated from the 12MHz crystal, using
* 4-times subsampling. So quot=12e6/(4*baud). Also see description
* of register E.
*
* Registers 0-7:
* These seem to be the same as for a regular 16450. The FCR is set
* to UART_FCR_DMA_SELECT (0x8), I guess to enable transfers between
* the UART and the USB bridge/DMA engine.
*
* Register 8:
* By trial and error, I found out that bit 0 enables hardware CTS,
* stopping TX when CTS is +5V. Bit 1 does the same for RTS, making
* RTS +5V when the 3116 cannot transfer the data to the USB bus
* (verified by disabling the reading URB). Note that as far as I can
* tell, the windows driver does NOT use this, so there might be some
* hardware bug or something.
*
* According to a patch provided here
* (http://lkml.org/lkml/2009/7/26/56), the ARK3116 can also be used
* as an IrDA dongle. Since I do not have such a thing, I could not
* investigate that aspect. However, I can speculate ;-).
*
* - IrDA encodes data differently than RS232. Most likely, one of
* the bits in registers 9..E enables the IR ENDEC (encoder/decoder).
* - Depending on the IR transceiver, the input and output need to be
* inverted, so there are probably bits for that as well.
* - IrDA is half-duplex, so there should be a bit for selecting that.
*
* This still leaves at least two registers unaccounted for. Perhaps
* The chip can do XON/XOFF or CRC in HW?
*
* Register 9:
* Set to 0x00 for IrDA, when the baudrate is initialised.
*
* Register A:
* Set to 0x01 for IrDA, at init.
*
* Register B:
* Set to 0x01 for IrDA, 0x00 for RS232, at init.
*
* Register C:
* Set to 00 for IrDA, at init.
*
* Register D:
* Set to 0x41 for IrDA, at init.
*
* Register E:
* Somekind of baudrate override. The windows driver seems to set
* this to 0x00 for normal baudrates, 0x01 for 460800, 0x02 for 921600.
* Since 460800 and 921600 cannot be obtained by dividing 3MHz by an integer,
* it could be somekind of subdivisor thingy.
* However,it does not seem to do anything: selecting 921600 (divisor 3,
* reg E=2), still gets 1 MHz. I also checked if registers 9, C or F would
* work, but they don't.
*
* Register F: unknown
*/
| gpl-2.0 |
TenchiMasaki/android-tegra-nv-3.1.10-rel-15r7 | drivers/net/tsi108_eth.c | 388 | 47891 | /*******************************************************************************
Copyright(c) 2006 Tundra Semiconductor Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*******************************************************************************/
/* This driver is based on the driver code originally developed
* for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
* scott.wood@timesys.com * Copyright (C) 2003 TimeSys Corporation
*
* Currently changes from original version are:
* - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
* - modifications to handle two ports independently and support for
* additional PHY devices (alexandre.bounine@tundra.com)
* - Get hardware information from platform device. (tie-fei.zang@freescale.com)
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <linux/timer.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/tsi108.h>
#include "tsi108_eth.h"
#define MII_READ_DELAY 10000 /* max link wait time in msec */
#define TSI108_RXRING_LEN 256
/* NOTE: The driver currently does not support receiving packets
* larger than the buffer size, so don't decrease this (unless you
* want to add such support).
*/
#define TSI108_RXBUF_SIZE 1536
#define TSI108_TXRING_LEN 256
#define TSI108_TX_INT_FREQ 64
/* Check the phy status every half a second. */
#define CHECK_PHY_INTERVAL (HZ/2)
static int tsi108_init_one(struct platform_device *pdev);
static int tsi108_ether_remove(struct platform_device *pdev);
struct tsi108_prv_data {
void __iomem *regs; /* Base of normal regs */
void __iomem *phyregs; /* Base of register bank used for PHY access */
struct net_device *dev;
struct napi_struct napi;
unsigned int phy; /* Index of PHY for this interface */
unsigned int irq_num;
unsigned int id;
unsigned int phy_type;
struct timer_list timer;/* Timer that triggers the check phy function */
unsigned int rxtail; /* Next entry in rxring to read */
unsigned int rxhead; /* Next entry in rxring to give a new buffer */
unsigned int rxfree; /* Number of free, allocated RX buffers */
unsigned int rxpending; /* Non-zero if there are still descriptors
* to be processed from a previous descriptor
* interrupt condition that has been cleared */
unsigned int txtail; /* Next TX descriptor to check status on */
unsigned int txhead; /* Next TX descriptor to use */
/* Number of free TX descriptors. This could be calculated from
* rxhead and rxtail if one descriptor were left unused to disambiguate
* full and empty conditions, but it's simpler to just keep track
* explicitly. */
unsigned int txfree;
unsigned int phy_ok; /* The PHY is currently powered on. */
/* PHY status (duplex is 1 for half, 2 for full,
* so that the default 0 indicates that neither has
* yet been configured). */
unsigned int link_up;
unsigned int speed;
unsigned int duplex;
tx_desc *txring;
rx_desc *rxring;
struct sk_buff *txskbs[TSI108_TXRING_LEN];
struct sk_buff *rxskbs[TSI108_RXRING_LEN];
dma_addr_t txdma, rxdma;
/* txlock nests in misclock and phy_lock */
spinlock_t txlock, misclock;
/* stats is used to hold the upper bits of each hardware counter,
* and tmpstats is used to hold the full values for returning
* to the caller of get_stats(). They must be separate in case
* an overflow interrupt occurs before the stats are consumed.
*/
struct net_device_stats stats;
struct net_device_stats tmpstats;
/* These stats are kept separate in hardware, thus require individual
* fields for handling carry. They are combined in get_stats.
*/
unsigned long rx_fcs; /* Add to rx_frame_errors */
unsigned long rx_short_fcs; /* Add to rx_frame_errors */
unsigned long rx_long_fcs; /* Add to rx_frame_errors */
unsigned long rx_underruns; /* Add to rx_length_errors */
unsigned long rx_overruns; /* Add to rx_length_errors */
unsigned long tx_coll_abort; /* Add to tx_aborted_errors/collisions */
unsigned long tx_pause_drop; /* Add to tx_aborted_errors */
unsigned long mc_hash[16];
u32 msg_enable; /* debug message level */
struct mii_if_info mii_if;
unsigned int init_media;
};
/* Structure for a device driver */
static struct platform_driver tsi_eth_driver = {
.probe = tsi108_init_one,
.remove = tsi108_ether_remove,
.driver = {
.name = "tsi-ethernet",
.owner = THIS_MODULE,
},
};
static void tsi108_timed_checker(unsigned long dev_ptr);
static void dump_eth_one(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
printk("Dumping %s...\n", dev->name);
printk("intstat %x intmask %x phy_ok %d"
" link %d speed %d duplex %d\n",
TSI_READ(TSI108_EC_INTSTAT),
TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
data->link_up, data->speed, data->duplex);
printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
data->txhead, data->txtail, data->txfree,
TSI_READ(TSI108_EC_TXSTAT),
TSI_READ(TSI108_EC_TXESTAT),
TSI_READ(TSI108_EC_TXERR));
printk("RX: head %d, tail %d, free %d, stat %x,"
" estat %x, err %x, pending %d\n\n",
data->rxhead, data->rxtail, data->rxfree,
TSI_READ(TSI108_EC_RXSTAT),
TSI_READ(TSI108_EC_RXESTAT),
TSI_READ(TSI108_EC_RXERR), data->rxpending);
}
/* Synchronization is needed between the thread and up/down events.
* Note that the PHY is accessed through the same registers for both
* interfaces, so this can't be made interface-specific.
*/
static DEFINE_SPINLOCK(phy_lock);
static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
{
unsigned i;
TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
(data->phy << TSI108_MAC_MII_ADDR_PHY) |
(reg << TSI108_MAC_MII_ADDR_REG));
TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
for (i = 0; i < 100; i++) {
if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
(TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
break;
udelay(10);
}
if (i == 100)
return 0xffff;
else
return TSI_READ_PHY(TSI108_MAC_MII_DATAIN);
}
static void tsi108_write_mii(struct tsi108_prv_data *data,
int reg, u16 val)
{
unsigned i = 100;
TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
(data->phy << TSI108_MAC_MII_ADDR_PHY) |
(reg << TSI108_MAC_MII_ADDR_REG));
TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
while (i--) {
if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
TSI108_MAC_MII_IND_BUSY))
break;
udelay(10);
}
}
static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
{
struct tsi108_prv_data *data = netdev_priv(dev);
return tsi108_read_mii(data, reg);
}
static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
{
struct tsi108_prv_data *data = netdev_priv(dev);
tsi108_write_mii(data, reg, val);
}
static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
int reg, u16 val)
{
unsigned i = 1000;
TSI_WRITE(TSI108_MAC_MII_ADDR,
(0x1e << TSI108_MAC_MII_ADDR_PHY)
| (reg << TSI108_MAC_MII_ADDR_REG));
TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
while(i--) {
if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
return;
udelay(10);
}
printk(KERN_ERR "%s function time out\n", __func__);
}
static int mii_speed(struct mii_if_info *mii)
{
int advert, lpa, val, media;
int lpa2 = 0;
int speed;
if (!mii_link_ok(mii))
return 0;
val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
if ((val & BMSR_ANEGCOMPLETE) == 0)
return 0;
advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
media = mii_nway_result(advert & lpa);
if (mii->supports_gmii)
lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
(media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
return speed;
}
static void tsi108_check_phy(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 mac_cfg2_reg, portctrl_reg;
u32 duplex;
u32 speed;
unsigned long flags;
spin_lock_irqsave(&phy_lock, flags);
if (!data->phy_ok)
goto out;
duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
data->init_media = 0;
if (netif_carrier_ok(dev)) {
speed = mii_speed(&data->mii_if);
if ((speed != data->speed) || duplex) {
mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
if (speed == 1000) {
mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
} else {
mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
}
data->speed = speed;
if (data->mii_if.full_duplex) {
mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
data->duplex = 2;
} else {
mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
data->duplex = 1;
}
TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
}
if (data->link_up == 0) {
/* The manual says it can take 3-4 usecs for the speed change
* to take effect.
*/
udelay(5);
spin_lock(&data->txlock);
if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
netif_wake_queue(dev);
data->link_up = 1;
spin_unlock(&data->txlock);
}
} else {
if (data->link_up == 1) {
netif_stop_queue(dev);
data->link_up = 0;
printk(KERN_NOTICE "%s : link is down\n", dev->name);
}
goto out;
}
out:
spin_unlock_irqrestore(&phy_lock, flags);
}
static inline void
tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
unsigned long *upper)
{
if (carry & carry_bit)
*upper += carry_shift;
}
static void tsi108_stat_carry(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 carry1, carry2;
spin_lock_irq(&data->misclock);
carry1 = TSI_READ(TSI108_STAT_CARRY1);
carry2 = TSI_READ(TSI108_STAT_CARRY2);
TSI_WRITE(TSI108_STAT_CARRY1, carry1);
TSI_WRITE(TSI108_STAT_CARRY2, carry2);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
TSI108_STAT_RXPKTS_CARRY,
&data->stats.rx_packets);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
TSI108_STAT_RXMCAST_CARRY,
&data->stats.multicast);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
TSI108_STAT_RXALIGN_CARRY,
&data->stats.rx_frame_errors);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
TSI108_STAT_RXLENGTH_CARRY,
&data->stats.rx_length_errors);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
TSI108_STAT_RXDROP_CARRY,
&data->stats.rx_missed_errors);
tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
TSI108_STAT_TXPKTS_CARRY,
&data->stats.tx_packets);
tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
TSI108_STAT_TXEXDEF_CARRY,
&data->stats.tx_aborted_errors);
tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
TSI108_STAT_TXTCOL_CARRY,
&data->stats.collisions);
tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
TSI108_STAT_TXPAUSEDROP_CARRY,
&data->tx_pause_drop);
spin_unlock_irq(&data->misclock);
}
/* Read a stat counter atomically with respect to carries.
* data->misclock must be held.
*/
static inline unsigned long
tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
int carry_shift, unsigned long *upper)
{
int carryreg;
unsigned long val;
if (reg < 0xb0)
carryreg = TSI108_STAT_CARRY1;
else
carryreg = TSI108_STAT_CARRY2;
again:
val = TSI_READ(reg) | *upper;
/* Check to see if it overflowed, but the interrupt hasn't
* been serviced yet. If so, handle the carry here, and
* try again.
*/
if (unlikely(TSI_READ(carryreg) & carry_bit)) {
*upper += carry_shift;
TSI_WRITE(carryreg, carry_bit);
goto again;
}
return val;
}
static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
{
unsigned long excol;
struct tsi108_prv_data *data = netdev_priv(dev);
spin_lock_irq(&data->misclock);
data->tmpstats.rx_packets =
tsi108_read_stat(data, TSI108_STAT_RXPKTS,
TSI108_STAT_CARRY1_RXPKTS,
TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
data->tmpstats.tx_packets =
tsi108_read_stat(data, TSI108_STAT_TXPKTS,
TSI108_STAT_CARRY2_TXPKTS,
TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
data->tmpstats.rx_bytes =
tsi108_read_stat(data, TSI108_STAT_RXBYTES,
TSI108_STAT_CARRY1_RXBYTES,
TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
data->tmpstats.tx_bytes =
tsi108_read_stat(data, TSI108_STAT_TXBYTES,
TSI108_STAT_CARRY2_TXBYTES,
TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
data->tmpstats.multicast =
tsi108_read_stat(data, TSI108_STAT_RXMCAST,
TSI108_STAT_CARRY1_RXMCAST,
TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
TSI108_STAT_CARRY2_TXEXCOL,
TSI108_STAT_TXEXCOL_CARRY,
&data->tx_coll_abort);
data->tmpstats.collisions =
tsi108_read_stat(data, TSI108_STAT_TXTCOL,
TSI108_STAT_CARRY2_TXTCOL,
TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
data->tmpstats.collisions += excol;
data->tmpstats.rx_length_errors =
tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
TSI108_STAT_CARRY1_RXLENGTH,
TSI108_STAT_RXLENGTH_CARRY,
&data->stats.rx_length_errors);
data->tmpstats.rx_length_errors +=
tsi108_read_stat(data, TSI108_STAT_RXRUNT,
TSI108_STAT_CARRY1_RXRUNT,
TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
data->tmpstats.rx_length_errors +=
tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
TSI108_STAT_CARRY1_RXJUMBO,
TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
data->tmpstats.rx_frame_errors =
tsi108_read_stat(data, TSI108_STAT_RXALIGN,
TSI108_STAT_CARRY1_RXALIGN,
TSI108_STAT_RXALIGN_CARRY,
&data->stats.rx_frame_errors);
data->tmpstats.rx_frame_errors +=
tsi108_read_stat(data, TSI108_STAT_RXFCS,
TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
&data->rx_fcs);
data->tmpstats.rx_frame_errors +=
tsi108_read_stat(data, TSI108_STAT_RXFRAG,
TSI108_STAT_CARRY1_RXFRAG,
TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
data->tmpstats.rx_missed_errors =
tsi108_read_stat(data, TSI108_STAT_RXDROP,
TSI108_STAT_CARRY1_RXDROP,
TSI108_STAT_RXDROP_CARRY,
&data->stats.rx_missed_errors);
/* These three are maintained by software. */
data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
data->tmpstats.tx_aborted_errors =
tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
TSI108_STAT_CARRY2_TXEXDEF,
TSI108_STAT_TXEXDEF_CARRY,
&data->stats.tx_aborted_errors);
data->tmpstats.tx_aborted_errors +=
tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
TSI108_STAT_CARRY2_TXPAUSE,
TSI108_STAT_TXPAUSEDROP_CARRY,
&data->tx_pause_drop);
data->tmpstats.tx_aborted_errors += excol;
data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
data->tmpstats.rx_crc_errors +
data->tmpstats.rx_frame_errors +
data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
spin_unlock_irq(&data->misclock);
return &data->tmpstats;
}
static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
{
TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
TSI108_EC_RXQ_PTRHIGH_VALID);
TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
| TSI108_EC_RXCTRL_QUEUE0);
}
static void tsi108_restart_tx(struct tsi108_prv_data * data)
{
TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
TSI108_EC_TXQ_PTRHIGH_VALID);
TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
}
/* txlock must be held by caller, with IRQs disabled, and
* with permission to re-enable them when the lock is dropped.
*/
static void tsi108_complete_tx(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
int tx;
struct sk_buff *skb;
int release = 0;
while (!data->txfree || data->txhead != data->txtail) {
tx = data->txtail;
if (data->txring[tx].misc & TSI108_TX_OWN)
break;
skb = data->txskbs[tx];
if (!(data->txring[tx].misc & TSI108_TX_OK))
printk("%s: bad tx packet, misc %x\n",
dev->name, data->txring[tx].misc);
data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
data->txfree++;
if (data->txring[tx].misc & TSI108_TX_EOF) {
dev_kfree_skb_any(skb);
release++;
}
}
if (release) {
if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
netif_wake_queue(dev);
}
}
static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
int frags = skb_shinfo(skb)->nr_frags + 1;
int i;
if (!data->phy_ok && net_ratelimit())
printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
if (!data->link_up) {
printk(KERN_ERR "%s: Transmit while link is down!\n",
dev->name);
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
if (data->txfree < MAX_SKB_FRAGS + 1) {
netif_stop_queue(dev);
if (net_ratelimit())
printk(KERN_ERR "%s: Transmit with full tx ring!\n",
dev->name);
return NETDEV_TX_BUSY;
}
if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
netif_stop_queue(dev);
}
spin_lock_irq(&data->txlock);
for (i = 0; i < frags; i++) {
int misc = 0;
int tx = data->txhead;
/* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
* the interrupt bit. TX descriptor-complete interrupts are
* enabled when the queue fills up, and masked when there is
* still free space. This way, when saturating the outbound
* link, the tx interrupts are kept to a reasonable level.
* When the queue is not full, reclamation of skbs still occurs
* as new packets are transmitted, or on a queue-empty
* interrupt.
*/
if ((tx % TSI108_TX_INT_FREQ == 0) &&
((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
misc = TSI108_TX_INT;
data->txskbs[tx] = skb;
if (i == 0) {
data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
data->txring[tx].buf0 =
dma_map_page(NULL, frag->page, frag->page_offset,
frag->size, DMA_TO_DEVICE);
data->txring[tx].len = frag->size;
}
if (i == frags - 1)
misc |= TSI108_TX_EOF;
if (netif_msg_pktdata(data)) {
int i;
printk("%s: Tx Frame contents (%d)\n", dev->name,
skb->len);
for (i = 0; i < skb->len; i++)
printk(" %2.2x", skb->data[i]);
printk(".\n");
}
data->txring[tx].misc = misc | TSI108_TX_OWN;
data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
data->txfree--;
}
tsi108_complete_tx(dev);
/* This must be done after the check for completed tx descriptors,
* so that the tail pointer is correct.
*/
if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
tsi108_restart_tx(data);
spin_unlock_irq(&data->txlock);
return NETDEV_TX_OK;
}
static int tsi108_complete_rx(struct net_device *dev, int budget)
{
struct tsi108_prv_data *data = netdev_priv(dev);
int done = 0;
while (data->rxfree && done != budget) {
int rx = data->rxtail;
struct sk_buff *skb;
if (data->rxring[rx].misc & TSI108_RX_OWN)
break;
skb = data->rxskbs[rx];
data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
data->rxfree--;
done++;
if (data->rxring[rx].misc & TSI108_RX_BAD) {
spin_lock_irq(&data->misclock);
if (data->rxring[rx].misc & TSI108_RX_CRC)
data->stats.rx_crc_errors++;
if (data->rxring[rx].misc & TSI108_RX_OVER)
data->stats.rx_fifo_errors++;
spin_unlock_irq(&data->misclock);
dev_kfree_skb_any(skb);
continue;
}
if (netif_msg_pktdata(data)) {
int i;
printk("%s: Rx Frame contents (%d)\n",
dev->name, data->rxring[rx].len);
for (i = 0; i < data->rxring[rx].len; i++)
printk(" %2.2x", skb->data[i]);
printk(".\n");
}
skb_put(skb, data->rxring[rx].len);
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
}
return done;
}
static int tsi108_refill_rx(struct net_device *dev, int budget)
{
struct tsi108_prv_data *data = netdev_priv(dev);
int done = 0;
while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
int rx = data->rxhead;
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
data->rxskbs[rx] = skb;
if (!skb)
break;
data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
TSI108_RX_SKB_SIZE,
DMA_FROM_DEVICE);
/* Sometimes the hardware sets blen to zero after packet
* reception, even though the manual says that it's only ever
* modified by the driver.
*/
data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
data->rxfree++;
done++;
}
if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
TSI108_EC_RXSTAT_QUEUE0))
tsi108_restart_rx(data, dev);
return done;
}
static int tsi108_poll(struct napi_struct *napi, int budget)
{
struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi);
struct net_device *dev = data->dev;
u32 estat = TSI_READ(TSI108_EC_RXESTAT);
u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
int num_received = 0, num_filled = 0;
intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
TSI_WRITE(TSI108_EC_RXESTAT, estat);
TSI_WRITE(TSI108_EC_INTSTAT, intstat);
if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
num_received = tsi108_complete_rx(dev, budget);
/* This should normally fill no more slots than the number of
* packets received in tsi108_complete_rx(). The exception
* is when we previously ran out of memory for RX SKBs. In that
* case, it's helpful to obey the budget, not only so that the
* CPU isn't hogged, but so that memory (which may still be low)
* is not hogged by one device.
*
* A work unit is considered to be two SKBs to allow us to catch
* up when the ring has shrunk due to out-of-memory but we're
* still removing the full budget's worth of packets each time.
*/
if (data->rxfree < TSI108_RXRING_LEN)
num_filled = tsi108_refill_rx(dev, budget * 2);
if (intstat & TSI108_INT_RXERROR) {
u32 err = TSI_READ(TSI108_EC_RXERR);
TSI_WRITE(TSI108_EC_RXERR, err);
if (err) {
if (net_ratelimit())
printk(KERN_DEBUG "%s: RX error %x\n",
dev->name, err);
if (!(TSI_READ(TSI108_EC_RXSTAT) &
TSI108_EC_RXSTAT_QUEUE0))
tsi108_restart_rx(data, dev);
}
}
if (intstat & TSI108_INT_RXOVERRUN) {
spin_lock_irq(&data->misclock);
data->stats.rx_fifo_errors++;
spin_unlock_irq(&data->misclock);
}
if (num_received < budget) {
data->rxpending = 0;
napi_complete(napi);
TSI_WRITE(TSI108_EC_INTMASK,
TSI_READ(TSI108_EC_INTMASK)
& ~(TSI108_INT_RXQUEUE0
| TSI108_INT_RXTHRESH |
TSI108_INT_RXOVERRUN |
TSI108_INT_RXERROR |
TSI108_INT_RXWAIT));
} else {
data->rxpending = 1;
}
return num_received;
}
static void tsi108_rx_int(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
/* A race could cause dev to already be scheduled, so it's not an
* error if that happens (and interrupts shouldn't be re-masked,
* because that can cause harmful races, if poll has already
* unmasked them but not cleared LINK_STATE_SCHED).
*
* This can happen if this code races with tsi108_poll(), which masks
* the interrupts after tsi108_irq_one() read the mask, but before
* napi_schedule is called. It could also happen due to calls
* from tsi108_check_rxring().
*/
if (napi_schedule_prep(&data->napi)) {
/* Mask, rather than ack, the receive interrupts. The ack
* will happen in tsi108_poll().
*/
TSI_WRITE(TSI108_EC_INTMASK,
TSI_READ(TSI108_EC_INTMASK) |
TSI108_INT_RXQUEUE0
| TSI108_INT_RXTHRESH |
TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
TSI108_INT_RXWAIT);
__napi_schedule(&data->napi);
} else {
if (!netif_running(dev)) {
/* This can happen if an interrupt occurs while the
* interface is being brought down, as the START
* bit is cleared before the stop function is called.
*
* In this case, the interrupts must be masked, or
* they will continue indefinitely.
*
* There's a race here if the interface is brought down
* and then up in rapid succession, as the device could
* be made running after the above check and before
* the masking below. This will only happen if the IRQ
* thread has a lower priority than the task brining
* up the interface. Fixing this race would likely
* require changes in generic code.
*/
TSI_WRITE(TSI108_EC_INTMASK,
TSI_READ
(TSI108_EC_INTMASK) |
TSI108_INT_RXQUEUE0 |
TSI108_INT_RXTHRESH |
TSI108_INT_RXOVERRUN |
TSI108_INT_RXERROR |
TSI108_INT_RXWAIT);
}
}
}
/* If the RX ring has run out of memory, try periodically
* to allocate some more, as otherwise poll would never
* get called (apart from the initial end-of-queue condition).
*
* This is called once per second (by default) from the thread.
*/
static void tsi108_check_rxring(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
/* A poll is scheduled, as opposed to caling tsi108_refill_rx
* directly, so as to keep the receive path single-threaded
* (and thus not needing a lock).
*/
if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
tsi108_rx_int(dev);
}
static void tsi108_tx_int(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 estat = TSI_READ(TSI108_EC_TXESTAT);
TSI_WRITE(TSI108_EC_TXESTAT, estat);
TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
u32 err = TSI_READ(TSI108_EC_TXERR);
TSI_WRITE(TSI108_EC_TXERR, err);
if (err && net_ratelimit())
printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
}
if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
spin_lock(&data->txlock);
tsi108_complete_tx(dev);
spin_unlock(&data->txlock);
}
}
static irqreturn_t tsi108_irq(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct tsi108_prv_data *data = netdev_priv(dev);
u32 stat = TSI_READ(TSI108_EC_INTSTAT);
if (!(stat & TSI108_INT_ANY))
return IRQ_NONE; /* Not our interrupt */
stat &= ~TSI_READ(TSI108_EC_INTMASK);
if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
TSI108_INT_TXERROR))
tsi108_tx_int(dev);
if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
TSI108_INT_RXERROR))
tsi108_rx_int(dev);
if (stat & TSI108_INT_SFN) {
if (net_ratelimit())
printk(KERN_DEBUG "%s: SFN error\n", dev->name);
TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
}
if (stat & TSI108_INT_STATCARRY) {
tsi108_stat_carry(dev);
TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
}
return IRQ_HANDLED;
}
static void tsi108_stop_ethernet(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
int i = 1000;
/* Disable all TX and RX queues ... */
TSI_WRITE(TSI108_EC_TXCTRL, 0);
TSI_WRITE(TSI108_EC_RXCTRL, 0);
/* ...and wait for them to become idle */
while(i--) {
if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
break;
udelay(10);
}
i = 1000;
while(i--){
if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
return;
udelay(10);
}
printk(KERN_ERR "%s function time out\n", __func__);
}
static void tsi108_reset_ether(struct tsi108_prv_data * data)
{
TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
udelay(100);
TSI_WRITE(TSI108_MAC_CFG1, 0);
TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
udelay(100);
TSI_WRITE(TSI108_EC_PORTCTRL,
TSI_READ(TSI108_EC_PORTCTRL) &
~TSI108_EC_PORTCTRL_STATRST);
TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
udelay(100);
TSI_WRITE(TSI108_EC_TXCFG,
TSI_READ(TSI108_EC_TXCFG) &
~TSI108_EC_TXCFG_RST);
TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
udelay(100);
TSI_WRITE(TSI108_EC_RXCFG,
TSI_READ(TSI108_EC_RXCFG) &
~TSI108_EC_RXCFG_RST);
TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
TSI108_MAC_MII_MGMT_RST);
udelay(100);
TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
(TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
~(TSI108_MAC_MII_MGMT_RST |
TSI108_MAC_MII_MGMT_CLK)) | 0x07);
}
static int tsi108_get_mac(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
/* Note that the octets are reversed from what the manual says,
* producing an even weirder ordering...
*/
if (word2 == 0 && word1 == 0) {
dev->dev_addr[0] = 0x00;
dev->dev_addr[1] = 0x06;
dev->dev_addr[2] = 0xd2;
dev->dev_addr[3] = 0x00;
dev->dev_addr[4] = 0x00;
if (0x8 == data->phy)
dev->dev_addr[5] = 0x01;
else
dev->dev_addr[5] = 0x02;
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
(dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
TSI_WRITE(TSI108_MAC_ADDR1, word1);
TSI_WRITE(TSI108_MAC_ADDR2, word2);
} else {
dev->dev_addr[0] = (word2 >> 16) & 0xff;
dev->dev_addr[1] = (word2 >> 24) & 0xff;
dev->dev_addr[2] = (word1 >> 0) & 0xff;
dev->dev_addr[3] = (word1 >> 8) & 0xff;
dev->dev_addr[4] = (word1 >> 16) & 0xff;
dev->dev_addr[5] = (word1 >> 24) & 0xff;
}
if (!is_valid_ether_addr(dev->dev_addr)) {
printk(KERN_ERR
"%s: Invalid MAC address. word1: %08x, word2: %08x\n",
dev->name, word1, word2);
return -EINVAL;
}
return 0;
}
static int tsi108_set_mac(struct net_device *dev, void *addr)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1, word2;
int i;
if (!is_valid_ether_addr(addr))
return -EINVAL;
for (i = 0; i < 6; i++)
/* +2 is for the offset of the HW addr type */
dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
(dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
spin_lock_irq(&data->misclock);
TSI_WRITE(TSI108_MAC_ADDR1, word1);
TSI_WRITE(TSI108_MAC_ADDR2, word2);
spin_lock(&data->txlock);
if (data->txfree && data->link_up)
netif_wake_queue(dev);
spin_unlock(&data->txlock);
spin_unlock_irq(&data->misclock);
return 0;
}
/* Protected by dev->xmit_lock. */
static void tsi108_set_rx_mode(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
if (dev->flags & IFF_PROMISC) {
rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
goto out;
}
rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
struct netdev_hw_addr *ha;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
memset(data->mc_hash, 0, sizeof(data->mc_hash));
netdev_for_each_mc_addr(ha, dev) {
u32 hash, crc;
crc = ether_crc(6, ha->addr);
hash = crc >> 23;
__set_bit(hash, &data->mc_hash[0]);
}
TSI_WRITE(TSI108_EC_HASHADDR,
TSI108_EC_HASHADDR_AUTOINC |
TSI108_EC_HASHADDR_MCAST);
for (i = 0; i < 16; i++) {
/* The manual says that the hardware may drop
* back-to-back writes to the data register.
*/
udelay(1);
TSI_WRITE(TSI108_EC_HASHDATA,
data->mc_hash[i]);
}
}
out:
TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
}
static void tsi108_init_phy(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 i = 0;
u16 phyval = 0;
unsigned long flags;
spin_lock_irqsave(&phy_lock, flags);
tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
while (--i) {
if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
break;
udelay(10);
}
if (i == 0)
printk(KERN_ERR "%s function time out\n", __func__);
if (data->phy_type == TSI108_PHY_BCM54XX) {
tsi108_write_mii(data, 0x09, 0x0300);
tsi108_write_mii(data, 0x10, 0x1020);
tsi108_write_mii(data, 0x1c, 0x8c00);
}
tsi108_write_mii(data,
MII_BMCR,
BMCR_ANENABLE | BMCR_ANRESTART);
while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
cpu_relax();
/* Set G/MII mode and receive clock select in TBI control #2. The
* second port won't work if this isn't done, even though we don't
* use TBI mode.
*/
tsi108_write_tbi(data, 0x11, 0x30);
/* FIXME: It seems to take more than 2 back-to-back reads to the
* PHY_STAT register before the link up status bit is set.
*/
data->link_up = 0;
while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
BMSR_LSTATUS)) {
if (i++ > (MII_READ_DELAY / 10)) {
break;
}
spin_unlock_irqrestore(&phy_lock, flags);
msleep(10);
spin_lock_irqsave(&phy_lock, flags);
}
data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
data->phy_ok = 1;
data->init_media = 1;
spin_unlock_irqrestore(&phy_lock, flags);
}
static void tsi108_kill_phy(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&phy_lock, flags);
tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
data->phy_ok = 0;
spin_unlock_irqrestore(&phy_lock, flags);
}
static int tsi108_open(struct net_device *dev)
{
int i;
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
if (i != 0) {
printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
data->id, data->irq_num);
return i;
} else {
dev->irq = data->irq_num;
printk(KERN_NOTICE
"tsi108_open : Port %d Assigned IRQ %d to %s\n",
data->id, dev->irq, dev->name);
}
data->rxring = dma_alloc_coherent(NULL, rxring_size,
&data->rxdma, GFP_KERNEL);
if (!data->rxring) {
printk(KERN_DEBUG
"TSI108_ETH: failed to allocate memory for rxring!\n");
return -ENOMEM;
} else {
memset(data->rxring, 0, rxring_size);
}
data->txring = dma_alloc_coherent(NULL, txring_size,
&data->txdma, GFP_KERNEL);
if (!data->txring) {
printk(KERN_DEBUG
"TSI108_ETH: failed to allocate memory for txring!\n");
pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
return -ENOMEM;
} else {
memset(data->txring, 0, txring_size);
}
for (i = 0; i < TSI108_RXRING_LEN; i++) {
data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
data->rxring[i].blen = TSI108_RXBUF_SIZE;
data->rxring[i].vlan = 0;
}
data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
data->rxtail = 0;
data->rxhead = 0;
for (i = 0; i < TSI108_RXRING_LEN; i++) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
if (!skb) {
/* Bah. No memory for now, but maybe we'll get
* some more later.
* For now, we'll live with the smaller ring.
*/
printk(KERN_WARNING
"%s: Could only allocate %d receive skb(s).\n",
dev->name, i);
data->rxhead = i;
break;
}
data->rxskbs[i] = skb;
data->rxskbs[i] = skb;
data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
}
data->rxfree = i;
TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
for (i = 0; i < TSI108_TXRING_LEN; i++) {
data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
data->txring[i].misc = 0;
}
data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
data->txtail = 0;
data->txhead = 0;
data->txfree = TSI108_TXRING_LEN;
TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
tsi108_init_phy(dev);
napi_enable(&data->napi);
setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
mod_timer(&data->timer, jiffies + 1);
tsi108_restart_rx(data, dev);
TSI_WRITE(TSI108_EC_INTSTAT, ~0);
TSI_WRITE(TSI108_EC_INTMASK,
~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
TSI108_INT_SFN | TSI108_INT_STATCARRY));
TSI_WRITE(TSI108_MAC_CFG1,
TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
netif_start_queue(dev);
return 0;
}
static int tsi108_close(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
netif_stop_queue(dev);
napi_disable(&data->napi);
del_timer_sync(&data->timer);
tsi108_stop_ethernet(dev);
tsi108_kill_phy(dev);
TSI_WRITE(TSI108_EC_INTMASK, ~0);
TSI_WRITE(TSI108_MAC_CFG1, 0);
/* Check for any pending TX packets, and drop them. */
while (!data->txfree || data->txhead != data->txtail) {
int tx = data->txtail;
struct sk_buff *skb;
skb = data->txskbs[tx];
data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
data->txfree++;
dev_kfree_skb(skb);
}
free_irq(data->irq_num, dev);
/* Discard the RX ring. */
while (data->rxfree) {
int rx = data->rxtail;
struct sk_buff *skb;
skb = data->rxskbs[rx];
data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
data->rxfree--;
dev_kfree_skb(skb);
}
dma_free_coherent(0,
TSI108_RXRING_LEN * sizeof(rx_desc),
data->rxring, data->rxdma);
dma_free_coherent(0,
TSI108_TXRING_LEN * sizeof(tx_desc),
data->txring, data->txdma);
return 0;
}
static void tsi108_init_mac(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
TSI108_MAC_CFG2_PADCRC);
TSI_WRITE(TSI108_EC_TXTHRESH,
(192 << TSI108_EC_TXTHRESH_STARTFILL) |
(192 << TSI108_EC_TXTHRESH_STOPFILL));
TSI_WRITE(TSI108_STAT_CARRYMASK1,
~(TSI108_STAT_CARRY1_RXBYTES |
TSI108_STAT_CARRY1_RXPKTS |
TSI108_STAT_CARRY1_RXFCS |
TSI108_STAT_CARRY1_RXMCAST |
TSI108_STAT_CARRY1_RXALIGN |
TSI108_STAT_CARRY1_RXLENGTH |
TSI108_STAT_CARRY1_RXRUNT |
TSI108_STAT_CARRY1_RXJUMBO |
TSI108_STAT_CARRY1_RXFRAG |
TSI108_STAT_CARRY1_RXJABBER |
TSI108_STAT_CARRY1_RXDROP));
TSI_WRITE(TSI108_STAT_CARRYMASK2,
~(TSI108_STAT_CARRY2_TXBYTES |
TSI108_STAT_CARRY2_TXPKTS |
TSI108_STAT_CARRY2_TXEXDEF |
TSI108_STAT_CARRY2_TXEXCOL |
TSI108_STAT_CARRY2_TXTCOL |
TSI108_STAT_CARRY2_TXPAUSE));
TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
TSI_WRITE(TSI108_MAC_CFG1, 0);
TSI_WRITE(TSI108_EC_RXCFG,
TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
TSI108_EC_TXQ_CFG_SFNPORT));
TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
TSI108_EC_RXQ_CFG_SFNPORT));
TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
TSI108_EC_TXQ_BUFCFG_BURST256 |
TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
TSI108_EC_TXQ_BUFCFG_SFNPORT));
TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
TSI108_EC_RXQ_BUFCFG_BURST256 |
TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
TSI108_EC_RXQ_BUFCFG_SFNPORT));
TSI_WRITE(TSI108_EC_INTMASK, ~0);
}
static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
rc = mii_ethtool_gset(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
}
static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
rc = mii_ethtool_sset(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
}
static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
}
static const struct ethtool_ops tsi108_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = tsi108_get_settings,
.set_settings = tsi108_set_settings,
};
static const struct net_device_ops tsi108_netdev_ops = {
.ndo_open = tsi108_open,
.ndo_stop = tsi108_close,
.ndo_start_xmit = tsi108_send_packet,
.ndo_set_multicast_list = tsi108_set_rx_mode,
.ndo_get_stats = tsi108_get_stats,
.ndo_do_ioctl = tsi108_do_ioctl,
.ndo_set_mac_address = tsi108_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
};
static int
tsi108_init_one(struct platform_device *pdev)
{
struct net_device *dev = NULL;
struct tsi108_prv_data *data = NULL;
hw_info *einfo;
int err = 0;
einfo = pdev->dev.platform_data;
if (NULL == einfo) {
printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
pdev->id);
return -ENODEV;
}
/* Create an ethernet device instance */
dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
if (!dev) {
printk("tsi108_eth: Could not allocate a device structure\n");
return -ENOMEM;
}
printk("tsi108_eth%d: probe...\n", pdev->id);
data = netdev_priv(dev);
data->dev = dev;
pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
pdev->id, einfo->regs, einfo->phyregs,
einfo->phy, einfo->irq_num);
data->regs = ioremap(einfo->regs, 0x400);
if (NULL == data->regs) {
err = -ENOMEM;
goto regs_fail;
}
data->phyregs = ioremap(einfo->phyregs, 0x400);
if (NULL == data->phyregs) {
err = -ENOMEM;
goto regs_fail;
}
/* MII setup */
data->mii_if.dev = dev;
data->mii_if.mdio_read = tsi108_mdio_read;
data->mii_if.mdio_write = tsi108_mdio_write;
data->mii_if.phy_id = einfo->phy;
data->mii_if.phy_id_mask = 0x1f;
data->mii_if.reg_num_mask = 0x1f;
data->phy = einfo->phy;
data->phy_type = einfo->phy_type;
data->irq_num = einfo->irq_num;
data->id = pdev->id;
netif_napi_add(dev, &data->napi, tsi108_poll, 64);
dev->netdev_ops = &tsi108_netdev_ops;
dev->ethtool_ops = &tsi108_ethtool_ops;
/* Apparently, the Linux networking code won't use scatter-gather
* if the hardware doesn't do checksums. However, it's faster
* to checksum in place and use SG, as (among other reasons)
* the cache won't be dirtied (which then has to be flushed
* before DMA). The checksumming is done by the driver (via
* a new function skb_csum_dev() in net/core/skbuff.c).
*/
dev->features = NETIF_F_HIGHDMA;
spin_lock_init(&data->txlock);
spin_lock_init(&data->misclock);
tsi108_reset_ether(data);
tsi108_kill_phy(dev);
if ((err = tsi108_get_mac(dev)) != 0) {
printk(KERN_ERR "%s: Invalid MAC address. Please correct.\n",
dev->name);
goto register_fail;
}
tsi108_init_mac(dev);
err = register_netdev(dev);
if (err) {
printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
dev->name);
goto register_fail;
}
platform_set_drvdata(pdev, dev);
printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %pM\n",
dev->name, dev->dev_addr);
#ifdef DEBUG
data->msg_enable = DEBUG;
dump_eth_one(dev);
#endif
return 0;
register_fail:
iounmap(data->regs);
iounmap(data->phyregs);
regs_fail:
free_netdev(dev);
return err;
}
/* There's no way to either get interrupts from the PHY when
* something changes, or to have the Tsi108 automatically communicate
* with the PHY to reconfigure itself.
*
* Thus, we have to do it using a timer.
*/
static void tsi108_timed_checker(unsigned long dev_ptr)
{
struct net_device *dev = (struct net_device *)dev_ptr;
struct tsi108_prv_data *data = netdev_priv(dev);
tsi108_check_phy(dev);
tsi108_check_rxring(dev);
mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
}
static int tsi108_ether_init(void)
{
int ret;
ret = platform_driver_register (&tsi_eth_driver);
if (ret < 0){
printk("tsi108_ether_init: error initializing ethernet "
"device\n");
return ret;
}
return 0;
}
static int tsi108_ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct tsi108_prv_data *priv = netdev_priv(dev);
unregister_netdev(dev);
tsi108_stop_ethernet(dev);
platform_set_drvdata(pdev, NULL);
iounmap(priv->regs);
iounmap(priv->phyregs);
free_netdev(dev);
return 0;
}
static void tsi108_ether_exit(void)
{
platform_driver_unregister(&tsi_eth_driver);
}
module_init(tsi108_ether_init);
module_exit(tsi108_ether_exit);
MODULE_AUTHOR("Tundra Semiconductor Corporation");
MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:tsi-ethernet");
| gpl-2.0 |
cphelps76/DEMENTED_kernel_grouper | drivers/usb/host/ehci-omap.c | 388 | 9280 | /*
* ehci-omap.c - driver for USBHOST on OMAP3/4 processors
*
* Bus Glue for the EHCI controllers in OMAP3/4
* Tested on several OMAP3 boards, and OMAP4 Pandaboard
*
* Copyright (C) 2007-2011 Texas Instruments, Inc.
* Author: Vikram Pandita <vikram.pandita@ti.com>
* Author: Anand Gadiyar <gadiyar@ti.com>
* Author: Keshava Munegowda <keshava_mgowda@ti.com>
*
* Copyright (C) 2009 Nokia Corporation
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
*
* Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* TODO (last updated Feb 27, 2010):
* - add kernel-doc
* - enable AUTOIDLE
* - add suspend/resume
* - add HSIC and TLL support
* - convert to use hwmod and runtime PM
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb/ulpi.h>
#include <plat/usb.h>
#include <linux/regulator/consumer.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
#define EHCI_INSNREG05_ULPI (0xA4)
#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22
#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16
#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
/*-------------------------------------------------------------------------*/
static const struct hc_driver ehci_omap_hc_driver;
static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
{
__raw_writel(val, base + reg);
}
static inline u32 ehci_read(void __iomem *base, u32 reg)
{
return __raw_readl(base + reg);
}
static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
{
struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
unsigned reg = 0;
reg = ULPI_FUNC_CTRL_RESET
/* FUNCTION_CTRL_SET register */
| (ULPI_SET(ULPI_FUNC_CTRL) << EHCI_INSNREG05_ULPI_REGADD_SHIFT)
/* Write */
| (2 << EHCI_INSNREG05_ULPI_OPSEL_SHIFT)
/* PORTn */
| ((port + 1) << EHCI_INSNREG05_ULPI_PORTSEL_SHIFT)
/* start ULPI access*/
| (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT);
ehci_write(hcd->regs, EHCI_INSNREG05_ULPI, reg);
/* Wait for ULPI access completion */
while ((ehci_read(hcd->regs, EHCI_INSNREG05_ULPI)
& (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT))) {
cpu_relax();
if (time_after(jiffies, timeout)) {
dev_dbg(&pdev->dev, "phy reset operation timed out\n");
break;
}
}
}
static void disable_put_regulator(
struct ehci_hcd_omap_platform_data *pdata)
{
int i;
for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
if (pdata->regulator[i]) {
regulator_disable(pdata->regulator[i]);
regulator_put(pdata->regulator[i]);
}
}
}
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
/**
* ehci_hcd_omap_probe - initialize TI-based HCDs
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
static int ehci_hcd_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ehci_hcd_omap_platform_data *pdata = dev->platform_data;
struct resource *res;
struct usb_hcd *hcd;
void __iomem *regs;
struct ehci_hcd *omap_ehci;
int ret = -ENODEV;
int irq;
int i;
char supply[7];
if (usb_disabled())
return -ENODEV;
if (!dev->parent) {
dev_err(dev, "Missing parent device\n");
return -ENODEV;
}
irq = platform_get_irq_byname(pdev, "ehci-irq");
if (irq < 0) {
dev_err(dev, "EHCI irq failed\n");
return -ENODEV;
}
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "ehci");
if (!res) {
dev_err(dev, "UHH EHCI get resource failed\n");
return -ENODEV;
}
regs = ioremap(res->start, resource_size(res));
if (!regs) {
dev_err(dev, "UHH EHCI ioremap failed\n");
return -ENOMEM;
}
hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
dev_name(dev));
if (!hcd) {
dev_err(dev, "failed to create hcd with err %d\n", ret);
ret = -ENOMEM;
goto err_io;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
/* get ehci regulator and enable */
for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) {
pdata->regulator[i] = NULL;
continue;
}
snprintf(supply, sizeof(supply), "hsusb%d", i);
pdata->regulator[i] = regulator_get(dev, supply);
if (IS_ERR(pdata->regulator[i])) {
pdata->regulator[i] = NULL;
dev_dbg(dev,
"failed to get ehci port%d regulator\n", i);
} else {
regulator_enable(pdata->regulator[i]);
}
}
ret = omap_usbhs_enable(dev);
if (ret) {
dev_err(dev, "failed to start usbhs with err %d\n", ret);
goto err_enable;
}
/*
* An undocumented "feature" in the OMAP3 EHCI controller,
* causes suspended ports to be taken out of suspend when
* the USBCMD.Run/Stop bit is cleared (for example when
* we do ehci_bus_suspend).
* This breaks suspend-resume if the root-hub is allowed
* to suspend. Writing 1 to this undocumented register bit
* disables this feature and restores normal behavior.
*/
ehci_write(regs, EHCI_INSNREG04,
EHCI_INSNREG04_DISABLE_UNSUSPEND);
/* Soft reset the PHY using PHY reset command over ULPI */
if (pdata->port_mode[0] == OMAP_EHCI_PORT_MODE_PHY)
omap_ehci_soft_phy_reset(pdev, 0);
if (pdata->port_mode[1] == OMAP_EHCI_PORT_MODE_PHY)
omap_ehci_soft_phy_reset(pdev, 1);
omap_ehci = hcd_to_ehci(hcd);
omap_ehci->sbrn = 0x20;
/* we know this is the memory we want, no need to ioremap again */
omap_ehci->caps = hcd->regs;
omap_ehci->regs = hcd->regs
+ HC_LENGTH(ehci, readl(&omap_ehci->caps->hc_capbase));
dbg_hcs_params(omap_ehci, "reset");
dbg_hcc_params(omap_ehci, "reset");
/* cache this readonly data; minimize chip reads */
omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret) {
dev_err(dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
}
/* root ports should always stay powered */
ehci_port_power(omap_ehci, 1);
return 0;
err_add_hcd:
omap_usbhs_disable(dev);
err_enable:
disable_put_regulator(pdata);
usb_put_hcd(hcd);
err_io:
iounmap(regs);
return ret;
}
/**
* ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
* @pdev: USB Host Controller being removed
*
* Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
static int ehci_hcd_omap_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
usb_remove_hcd(hcd);
omap_usbhs_disable(dev);
disable_put_regulator(dev->platform_data);
iounmap(hcd->regs);
usb_put_hcd(hcd);
return 0;
}
static void ehci_hcd_omap_shutdown(struct platform_device *pdev)
{
struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
static struct platform_driver ehci_hcd_omap_driver = {
.probe = ehci_hcd_omap_probe,
.remove = ehci_hcd_omap_remove,
.shutdown = ehci_hcd_omap_shutdown,
/*.suspend = ehci_hcd_omap_suspend, */
/*.resume = ehci_hcd_omap_resume, */
.driver = {
.name = "ehci-omap",
}
};
/*-------------------------------------------------------------------------*/
static const struct hc_driver ehci_omap_hc_driver = {
.description = hcd_name,
.product_desc = "OMAP-EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
*/
.reset = ehci_init,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
MODULE_ALIAS("platform:omap-ehci");
MODULE_AUTHOR("Texas Instruments, Inc.");
MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
| gpl-2.0 |
RomanHargrave/pf-kernel | arch/powerpc/kvm/book3s_paired_singles.c | 644 | 31449 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright Novell Inc 2010
*
* Authors: Alexander Graf <agraf@suse.de>
*/
#include <asm/kvm.h>
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_fpu.h>
#include <asm/reg.h>
#include <asm/cacheflush.h>
#include <asm/switch_to.h>
#include <linux/vmalloc.h>
/* #define DEBUG */
#ifdef DEBUG
#define dprintk printk
#else
#define dprintk(...) do { } while(0);
#endif
#define OP_LFS 48
#define OP_LFSU 49
#define OP_LFD 50
#define OP_LFDU 51
#define OP_STFS 52
#define OP_STFSU 53
#define OP_STFD 54
#define OP_STFDU 55
#define OP_PSQ_L 56
#define OP_PSQ_LU 57
#define OP_PSQ_ST 60
#define OP_PSQ_STU 61
#define OP_31_LFSX 535
#define OP_31_LFSUX 567
#define OP_31_LFDX 599
#define OP_31_LFDUX 631
#define OP_31_STFSX 663
#define OP_31_STFSUX 695
#define OP_31_STFX 727
#define OP_31_STFUX 759
#define OP_31_LWIZX 887
#define OP_31_STFIWX 983
#define OP_59_FADDS 21
#define OP_59_FSUBS 20
#define OP_59_FSQRTS 22
#define OP_59_FDIVS 18
#define OP_59_FRES 24
#define OP_59_FMULS 25
#define OP_59_FRSQRTES 26
#define OP_59_FMSUBS 28
#define OP_59_FMADDS 29
#define OP_59_FNMSUBS 30
#define OP_59_FNMADDS 31
#define OP_63_FCMPU 0
#define OP_63_FCPSGN 8
#define OP_63_FRSP 12
#define OP_63_FCTIW 14
#define OP_63_FCTIWZ 15
#define OP_63_FDIV 18
#define OP_63_FADD 21
#define OP_63_FSQRT 22
#define OP_63_FSEL 23
#define OP_63_FRE 24
#define OP_63_FMUL 25
#define OP_63_FRSQRTE 26
#define OP_63_FMSUB 28
#define OP_63_FMADD 29
#define OP_63_FNMSUB 30
#define OP_63_FNMADD 31
#define OP_63_FCMPO 32
#define OP_63_MTFSB1 38 // XXX
#define OP_63_FSUB 20
#define OP_63_FNEG 40
#define OP_63_MCRFS 64
#define OP_63_MTFSB0 70
#define OP_63_FMR 72
#define OP_63_MTFSFI 134
#define OP_63_FABS 264
#define OP_63_MFFS 583
#define OP_63_MTFSF 711
#define OP_4X_PS_CMPU0 0
#define OP_4X_PSQ_LX 6
#define OP_4XW_PSQ_STX 7
#define OP_4A_PS_SUM0 10
#define OP_4A_PS_SUM1 11
#define OP_4A_PS_MULS0 12
#define OP_4A_PS_MULS1 13
#define OP_4A_PS_MADDS0 14
#define OP_4A_PS_MADDS1 15
#define OP_4A_PS_DIV 18
#define OP_4A_PS_SUB 20
#define OP_4A_PS_ADD 21
#define OP_4A_PS_SEL 23
#define OP_4A_PS_RES 24
#define OP_4A_PS_MUL 25
#define OP_4A_PS_RSQRTE 26
#define OP_4A_PS_MSUB 28
#define OP_4A_PS_MADD 29
#define OP_4A_PS_NMSUB 30
#define OP_4A_PS_NMADD 31
#define OP_4X_PS_CMPO0 32
#define OP_4X_PSQ_LUX 38
#define OP_4XW_PSQ_STUX 39
#define OP_4X_PS_NEG 40
#define OP_4X_PS_CMPU1 64
#define OP_4X_PS_MR 72
#define OP_4X_PS_CMPO1 96
#define OP_4X_PS_NABS 136
#define OP_4X_PS_ABS 264
#define OP_4X_PS_MERGE00 528
#define OP_4X_PS_MERGE01 560
#define OP_4X_PS_MERGE10 592
#define OP_4X_PS_MERGE11 624
#define SCALAR_NONE 0
#define SCALAR_HIGH (1 << 0)
#define SCALAR_LOW (1 << 1)
#define SCALAR_NO_PS0 (1 << 2)
#define SCALAR_NO_PS1 (1 << 3)
#define GQR_ST_TYPE_MASK 0x00000007
#define GQR_ST_TYPE_SHIFT 0
#define GQR_ST_SCALE_MASK 0x00003f00
#define GQR_ST_SCALE_SHIFT 8
#define GQR_LD_TYPE_MASK 0x00070000
#define GQR_LD_TYPE_SHIFT 16
#define GQR_LD_SCALE_MASK 0x3f000000
#define GQR_LD_SCALE_SHIFT 24
#define GQR_QUANTIZE_FLOAT 0
#define GQR_QUANTIZE_U8 4
#define GQR_QUANTIZE_U16 5
#define GQR_QUANTIZE_S8 6
#define GQR_QUANTIZE_S16 7
#define FPU_LS_SINGLE 0
#define FPU_LS_DOUBLE 1
#define FPU_LS_SINGLE_LOW 2
static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
{
kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
}
static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
{
u32 dsisr;
u64 msr = kvmppc_get_msr(vcpu);
msr = kvmppc_set_field(msr, 33, 36, 0);
msr = kvmppc_set_field(msr, 42, 47, 0);
kvmppc_set_msr(vcpu, msr);
kvmppc_set_dar(vcpu, eaddr);
/* Page Fault */
dsisr = kvmppc_set_field(0, 33, 33, 1);
if (is_store)
dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
kvmppc_set_dsisr(vcpu, dsisr);
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
}
static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
int rs, ulong addr, int ls_type)
{
int emulated = EMULATE_FAIL;
int r;
char tmp[8];
int len = sizeof(u32);
if (ls_type == FPU_LS_DOUBLE)
len = sizeof(u64);
/* read from memory */
r = kvmppc_ld(vcpu, &addr, len, tmp, true);
vcpu->arch.paddr_accessed = addr;
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
len, 1);
goto done_load;
}
emulated = EMULATE_DONE;
/* put in registers */
switch (ls_type) {
case FPU_LS_SINGLE:
kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs));
vcpu->arch.qpr[rs] = *((u32*)tmp);
break;
case FPU_LS_DOUBLE:
VCPU_FPR(vcpu, rs) = *((u64*)tmp);
break;
}
dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp,
addr, len);
done_load:
return emulated;
}
static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
int rs, ulong addr, int ls_type)
{
int emulated = EMULATE_FAIL;
int r;
char tmp[8];
u64 val;
int len;
switch (ls_type) {
case FPU_LS_SINGLE:
kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp);
val = *((u32*)tmp);
len = sizeof(u32);
break;
case FPU_LS_SINGLE_LOW:
*((u32*)tmp) = VCPU_FPR(vcpu, rs);
val = VCPU_FPR(vcpu, rs) & 0xffffffff;
len = sizeof(u32);
break;
case FPU_LS_DOUBLE:
*((u64*)tmp) = VCPU_FPR(vcpu, rs);
val = VCPU_FPR(vcpu, rs);
len = sizeof(u64);
break;
default:
val = 0;
len = 0;
}
r = kvmppc_st(vcpu, &addr, len, tmp, true);
vcpu->arch.paddr_accessed = addr;
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, true);
} else if (r == EMULATE_DO_MMIO) {
emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
} else {
emulated = EMULATE_DONE;
}
dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
val, addr, len);
return emulated;
}
static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
int rs, ulong addr, bool w, int i)
{
int emulated = EMULATE_FAIL;
int r;
float one = 1.0;
u32 tmp[2];
/* read from memory */
if (w) {
r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
memcpy(&tmp[1], &one, sizeof(u32));
} else {
r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true);
}
vcpu->arch.paddr_accessed = addr;
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if ((r == EMULATE_DO_MMIO) && w) {
emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
4, 1);
vcpu->arch.qpr[rs] = tmp[1];
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs,
8, 1);
goto done_load;
}
emulated = EMULATE_DONE;
/* put in registers */
kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs));
vcpu->arch.qpr[rs] = tmp[1];
dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
tmp[1], addr, w ? 4 : 8);
done_load:
return emulated;
}
static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
int rs, ulong addr, bool w, int i)
{
int emulated = EMULATE_FAIL;
int r;
u32 tmp[2];
int len = w ? sizeof(u32) : sizeof(u64);
kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]);
tmp[1] = vcpu->arch.qpr[rs];
r = kvmppc_st(vcpu, &addr, len, tmp, true);
vcpu->arch.paddr_accessed = addr;
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, true);
} else if ((r == EMULATE_DO_MMIO) && w) {
emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
} else if (r == EMULATE_DO_MMIO) {
u64 val = ((u64)tmp[0] << 32) | tmp[1];
emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
} else {
emulated = EMULATE_DONE;
}
dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
tmp[0], tmp[1], addr, len);
return emulated;
}
/*
* Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included.
*/
static inline u32 inst_get_field(u32 inst, int msb, int lsb)
{
return kvmppc_get_field(inst, msb + 32, lsb + 32);
}
bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
{
if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
return false;
switch (get_op(inst)) {
case OP_PSQ_L:
case OP_PSQ_LU:
case OP_PSQ_ST:
case OP_PSQ_STU:
case OP_LFS:
case OP_LFSU:
case OP_LFD:
case OP_LFDU:
case OP_STFS:
case OP_STFSU:
case OP_STFD:
case OP_STFDU:
return true;
case 4:
/* X form */
switch (inst_get_field(inst, 21, 30)) {
case OP_4X_PS_CMPU0:
case OP_4X_PSQ_LX:
case OP_4X_PS_CMPO0:
case OP_4X_PSQ_LUX:
case OP_4X_PS_NEG:
case OP_4X_PS_CMPU1:
case OP_4X_PS_MR:
case OP_4X_PS_CMPO1:
case OP_4X_PS_NABS:
case OP_4X_PS_ABS:
case OP_4X_PS_MERGE00:
case OP_4X_PS_MERGE01:
case OP_4X_PS_MERGE10:
case OP_4X_PS_MERGE11:
return true;
}
/* XW form */
switch (inst_get_field(inst, 25, 30)) {
case OP_4XW_PSQ_STX:
case OP_4XW_PSQ_STUX:
return true;
}
/* A form */
switch (inst_get_field(inst, 26, 30)) {
case OP_4A_PS_SUM1:
case OP_4A_PS_SUM0:
case OP_4A_PS_MULS0:
case OP_4A_PS_MULS1:
case OP_4A_PS_MADDS0:
case OP_4A_PS_MADDS1:
case OP_4A_PS_DIV:
case OP_4A_PS_SUB:
case OP_4A_PS_ADD:
case OP_4A_PS_SEL:
case OP_4A_PS_RES:
case OP_4A_PS_MUL:
case OP_4A_PS_RSQRTE:
case OP_4A_PS_MSUB:
case OP_4A_PS_MADD:
case OP_4A_PS_NMSUB:
case OP_4A_PS_NMADD:
return true;
}
break;
case 59:
switch (inst_get_field(inst, 21, 30)) {
case OP_59_FADDS:
case OP_59_FSUBS:
case OP_59_FDIVS:
case OP_59_FRES:
case OP_59_FRSQRTES:
return true;
}
switch (inst_get_field(inst, 26, 30)) {
case OP_59_FMULS:
case OP_59_FMSUBS:
case OP_59_FMADDS:
case OP_59_FNMSUBS:
case OP_59_FNMADDS:
return true;
}
break;
case 63:
switch (inst_get_field(inst, 21, 30)) {
case OP_63_MTFSB0:
case OP_63_MTFSB1:
case OP_63_MTFSF:
case OP_63_MTFSFI:
case OP_63_MCRFS:
case OP_63_MFFS:
case OP_63_FCMPU:
case OP_63_FCMPO:
case OP_63_FNEG:
case OP_63_FMR:
case OP_63_FABS:
case OP_63_FRSP:
case OP_63_FDIV:
case OP_63_FADD:
case OP_63_FSUB:
case OP_63_FCTIW:
case OP_63_FCTIWZ:
case OP_63_FRSQRTE:
case OP_63_FCPSGN:
return true;
}
switch (inst_get_field(inst, 26, 30)) {
case OP_63_FMUL:
case OP_63_FSEL:
case OP_63_FMSUB:
case OP_63_FMADD:
case OP_63_FNMSUB:
case OP_63_FNMADD:
return true;
}
break;
case 31:
switch (inst_get_field(inst, 21, 30)) {
case OP_31_LFSX:
case OP_31_LFSUX:
case OP_31_LFDX:
case OP_31_LFDUX:
case OP_31_STFSX:
case OP_31_STFSUX:
case OP_31_STFX:
case OP_31_STFUX:
case OP_31_STFIWX:
return true;
}
break;
}
return false;
}
static int get_d_signext(u32 inst)
{
int d = inst & 0x8ff;
if (d & 0x800)
return -(d & 0x7ff);
return (d & 0x7ff);
}
static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
int reg_out, int reg_in1, int reg_in2,
int reg_in3, int scalar,
void (*func)(u64 *fpscr,
u32 *dst, u32 *src1,
u32 *src2, u32 *src3))
{
u32 *qpr = vcpu->arch.qpr;
u32 ps0_out;
u32 ps0_in1, ps0_in2, ps0_in3;
u32 ps1_in1, ps1_in2, ps1_in3;
/* RC */
WARN_ON(rc);
/* PS0 */
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3);
if (scalar & SCALAR_LOW)
ps0_in2 = qpr[reg_in2];
func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
ps0_in1, ps0_in2, ps0_in3, ps0_out);
if (!(scalar & SCALAR_NO_PS0))
kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
/* PS1 */
ps1_in1 = qpr[reg_in1];
ps1_in2 = qpr[reg_in2];
ps1_in3 = qpr[reg_in3];
if (scalar & SCALAR_HIGH)
ps1_in2 = ps0_in2;
if (!(scalar & SCALAR_NO_PS1))
func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
return EMULATE_DONE;
}
static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
int reg_out, int reg_in1, int reg_in2,
int scalar,
void (*func)(u64 *fpscr,
u32 *dst, u32 *src1,
u32 *src2))
{
u32 *qpr = vcpu->arch.qpr;
u32 ps0_out;
u32 ps0_in1, ps0_in2;
u32 ps1_out;
u32 ps1_in1, ps1_in2;
/* RC */
WARN_ON(rc);
/* PS0 */
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
if (scalar & SCALAR_LOW)
ps0_in2 = qpr[reg_in2];
else
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
if (!(scalar & SCALAR_NO_PS0)) {
dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
ps0_in1, ps0_in2, ps0_out);
kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
}
/* PS1 */
ps1_in1 = qpr[reg_in1];
ps1_in2 = qpr[reg_in2];
if (scalar & SCALAR_HIGH)
ps1_in2 = ps0_in2;
func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
if (!(scalar & SCALAR_NO_PS1)) {
qpr[reg_out] = ps1_out;
dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
ps1_in1, ps1_in2, qpr[reg_out]);
}
return EMULATE_DONE;
}
static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
int reg_out, int reg_in,
void (*func)(u64 *t,
u32 *dst, u32 *src1))
{
u32 *qpr = vcpu->arch.qpr;
u32 ps0_out, ps0_in;
u32 ps1_in;
/* RC */
WARN_ON(rc);
/* PS0 */
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in);
func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in);
dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
ps0_in, ps0_out);
kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
/* PS1 */
ps1_in = qpr[reg_in];
func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in);
dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
ps1_in, qpr[reg_out]);
return EMULATE_DONE;
}
int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u32 inst;
enum emulation_result emulated = EMULATE_DONE;
int ax_rd, ax_ra, ax_rb, ax_rc;
short full_d;
u64 *fpr_d, *fpr_a, *fpr_b, *fpr_c;
bool rcomp;
u32 cr;
#ifdef DEBUG
int i;
#endif
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
if (emulated != EMULATE_DONE)
return emulated;
ax_rd = inst_get_field(inst, 6, 10);
ax_ra = inst_get_field(inst, 11, 15);
ax_rb = inst_get_field(inst, 16, 20);
ax_rc = inst_get_field(inst, 21, 25);
full_d = inst_get_field(inst, 16, 31);
fpr_d = &VCPU_FPR(vcpu, ax_rd);
fpr_a = &VCPU_FPR(vcpu, ax_ra);
fpr_b = &VCPU_FPR(vcpu, ax_rb);
fpr_c = &VCPU_FPR(vcpu, ax_rc);
rcomp = (inst & 1) ? true : false;
cr = kvmppc_get_cr(vcpu);
if (!kvmppc_inst_is_paired_single(vcpu, inst))
return EMULATE_FAIL;
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
return EMULATE_AGAIN;
}
kvmppc_giveup_ext(vcpu, MSR_FP);
preempt_disable();
enable_kernel_fp();
/* Do we need to clear FE0 / FE1 here? Don't think so. */
#ifdef DEBUG
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
u32 f;
kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]);
}
#endif
switch (get_op(inst)) {
case OP_PSQ_L:
{
ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
bool w = inst_get_field(inst, 16, 16) ? true : false;
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
break;
}
case OP_PSQ_LU:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
bool w = inst_get_field(inst, 16, 16) ? true : false;
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case OP_PSQ_ST:
{
ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
bool w = inst_get_field(inst, 16, 16) ? true : false;
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
break;
}
case OP_PSQ_STU:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
bool w = inst_get_field(inst, 16, 16) ? true : false;
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case 4:
/* X form */
switch (inst_get_field(inst, 21, 30)) {
case OP_4X_PS_CMPU0:
/* XXX */
emulated = EMULATE_FAIL;
break;
case OP_4X_PSQ_LX:
{
ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
bool w = inst_get_field(inst, 21, 21) ? true : false;
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
break;
}
case OP_4X_PS_CMPO0:
/* XXX */
emulated = EMULATE_FAIL;
break;
case OP_4X_PSQ_LUX:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
bool w = inst_get_field(inst, 21, 21) ? true : false;
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case OP_4X_PS_NEG:
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL;
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu->arch.qpr[ax_rd] ^= 0x80000000;
break;
case OP_4X_PS_CMPU1:
/* XXX */
emulated = EMULATE_FAIL;
break;
case OP_4X_PS_MR:
WARN_ON(rcomp);
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
break;
case OP_4X_PS_CMPO1:
/* XXX */
emulated = EMULATE_FAIL;
break;
case OP_4X_PS_NABS:
WARN_ON(rcomp);
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL;
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu->arch.qpr[ax_rd] |= 0x80000000;
break;
case OP_4X_PS_ABS:
WARN_ON(rcomp);
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL;
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu->arch.qpr[ax_rd] &= ~0x80000000;
break;
case OP_4X_PS_MERGE00:
WARN_ON(rcomp);
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
/* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
&vcpu->arch.qpr[ax_rd]);
break;
case OP_4X_PS_MERGE01:
WARN_ON(rcomp);
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
break;
case OP_4X_PS_MERGE10:
WARN_ON(rcomp);
/* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
&VCPU_FPR(vcpu, ax_rd));
/* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
&vcpu->arch.qpr[ax_rd]);
break;
case OP_4X_PS_MERGE11:
WARN_ON(rcomp);
/* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
&VCPU_FPR(vcpu, ax_rd));
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
break;
}
/* XW form */
switch (inst_get_field(inst, 25, 30)) {
case OP_4XW_PSQ_STX:
{
ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
bool w = inst_get_field(inst, 21, 21) ? true : false;
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
break;
}
case OP_4XW_PSQ_STUX:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
bool w = inst_get_field(inst, 21, 21) ? true : false;
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
}
/* A form */
switch (inst_get_field(inst, 26, 30)) {
case OP_4A_PS_SUM1:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc);
break;
case OP_4A_PS_SUM0:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds);
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
break;
case OP_4A_PS_MULS0:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls);
break;
case OP_4A_PS_MULS1:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, SCALAR_LOW, fps_fmuls);
break;
case OP_4A_PS_MADDS0:
emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds);
break;
case OP_4A_PS_MADDS1:
emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds);
break;
case OP_4A_PS_DIV:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rb, SCALAR_NONE, fps_fdivs);
break;
case OP_4A_PS_SUB:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rb, SCALAR_NONE, fps_fsubs);
break;
case OP_4A_PS_ADD:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rb, SCALAR_NONE, fps_fadds);
break;
case OP_4A_PS_SEL:
emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel);
break;
case OP_4A_PS_RES:
emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
ax_rb, fps_fres);
break;
case OP_4A_PS_MUL:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, SCALAR_NONE, fps_fmuls);
break;
case OP_4A_PS_RSQRTE:
emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
ax_rb, fps_frsqrte);
break;
case OP_4A_PS_MSUB:
emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs);
break;
case OP_4A_PS_MADD:
emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds);
break;
case OP_4A_PS_NMSUB:
emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs);
break;
case OP_4A_PS_NMADD:
emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds);
break;
}
break;
/* Real FPU operations */
case OP_LFS:
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
FPU_LS_SINGLE);
break;
}
case OP_LFSU:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case OP_LFD:
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
break;
}
case OP_LFDU:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case OP_STFS:
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
FPU_LS_SINGLE);
break;
}
case OP_STFSU:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case OP_STFD:
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
break;
}
case OP_STFDU:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case 31:
switch (inst_get_field(inst, 21, 30)) {
case OP_31_LFSX:
{
ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
addr += kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
addr, FPU_LS_SINGLE);
break;
}
case OP_31_LFSUX:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
addr, FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case OP_31_LFDX:
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
break;
}
case OP_31_LFDUX:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case OP_31_STFSX:
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
addr, FPU_LS_SINGLE);
break;
}
case OP_31_STFSUX:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
addr, FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case OP_31_STFX:
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
break;
}
case OP_31_STFUX:
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
break;
}
case OP_31_STFIWX:
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
addr,
FPU_LS_SINGLE_LOW);
break;
}
break;
}
break;
case 59:
switch (inst_get_field(inst, 21, 30)) {
case OP_59_FADDS:
fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FSUBS:
fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FDIVS:
fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FRES:
fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FRSQRTES:
fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
}
switch (inst_get_field(inst, 26, 30)) {
case OP_59_FMULS:
fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FMSUBS:
fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FMADDS:
fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FNMSUBS:
fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FNMADDS:
fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
}
break;
case 63:
switch (inst_get_field(inst, 21, 30)) {
case OP_63_MTFSB0:
case OP_63_MTFSB1:
case OP_63_MCRFS:
case OP_63_MTFSFI:
/* XXX need to implement */
break;
case OP_63_MFFS:
/* XXX missing CR */
*fpr_d = vcpu->arch.fp.fpscr;
break;
case OP_63_MTFSF:
/* XXX missing fm bits */
/* XXX missing CR */
vcpu->arch.fp.fpscr = *fpr_b;
break;
case OP_63_FCMPU:
{
u32 tmp_cr;
u32 cr0_mask = 0xf0000000;
u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
cr &= ~(cr0_mask >> cr_shift);
cr |= (cr & cr0_mask) >> cr_shift;
break;
}
case OP_63_FCMPO:
{
u32 tmp_cr;
u32 cr0_mask = 0xf0000000;
u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
cr &= ~(cr0_mask >> cr_shift);
cr |= (cr & cr0_mask) >> cr_shift;
break;
}
case OP_63_FNEG:
fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
break;
case OP_63_FMR:
*fpr_d = *fpr_b;
break;
case OP_63_FABS:
fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
break;
case OP_63_FCPSGN:
fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
break;
case OP_63_FDIV:
fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
break;
case OP_63_FADD:
fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
break;
case OP_63_FSUB:
fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
break;
case OP_63_FCTIW:
fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
break;
case OP_63_FCTIWZ:
fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
break;
case OP_63_FRSP:
fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_63_FRSQRTE:
{
double one = 1.0f;
/* fD = sqrt(fB) */
fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
/* fD = 1.0f / fD */
fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
break;
}
}
switch (inst_get_field(inst, 26, 30)) {
case OP_63_FMUL:
fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
break;
case OP_63_FSEL:
fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
case OP_63_FMSUB:
fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
case OP_63_FMADD:
fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
case OP_63_FNMSUB:
fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
case OP_63_FNMADD:
fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
}
break;
}
#ifdef DEBUG
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
u32 f;
kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
}
#endif
if (rcomp)
kvmppc_set_cr(vcpu, cr);
preempt_enable();
return emulated;
}
| gpl-2.0 |
andrewoko-odion/linux | drivers/crypto/nx/nx-aes-ctr.c | 900 | 4646 | /**
* AES CTR routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 only.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
#include "nx.h"
static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
const u8 *in_key,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
switch (key_len) {
case AES_KEYSIZE_128:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
break;
case AES_KEYSIZE_192:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
break;
case AES_KEYSIZE_256:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
break;
default:
return -EINVAL;
}
csbcpb->cpb.hdr.mode = NX_MODE_AES_CTR;
memcpy(csbcpb->cpb.aes_ctr.key, in_key, key_len);
return 0;
}
static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
const u8 *in_key,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
memcpy(nx_ctx->priv.ctr.nonce,
in_key + key_len - CTR_RFC3686_NONCE_SIZE,
CTR_RFC3686_NONCE_SIZE);
key_len -= CTR_RFC3686_NONCE_SIZE;
return ctr_aes_nx_set_key(tfm, in_key, key_len);
}
static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
int rc;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
do {
to_process = nbytes - processed;
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
processed, csbcpb->cpb.aes_ctr.iv);
if (rc)
goto out;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
} while (processed < nbytes);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
u8 iv[16];
memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE,
desc->info, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
desc->info = iv;
return ctr_aes_nx_crypt(desc, dst, src, nbytes);
}
struct crypto_alg nx_ctr3686_aes_alg = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = nx_crypto_ctx_aes_ctr_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
.geniv = "seqiv",
.setkey = ctr3686_aes_nx_set_key,
.encrypt = ctr3686_aes_nx_crypt,
.decrypt = ctr3686_aes_nx_crypt,
}
};
| gpl-2.0 |
iConsole/Console-OS_kernel_common | drivers/video/backlight/tdo24m.c | 1156 | 10930 | /*
* tdo24m - SPI-based drivers for Toppoly TDO24M series LCD panels
*
* Copyright (C) 2008 Marvell International Ltd.
* Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/spi/tdo24m.h>
#include <linux/fb.h>
#include <linux/lcd.h>
#include <linux/slab.h>
#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
#define TDO24M_SPI_BUFF_SIZE (4)
#define MODE_QVGA 0
#define MODE_VGA 1
struct tdo24m {
struct spi_device *spi_dev;
struct lcd_device *lcd_dev;
struct spi_message msg;
struct spi_transfer xfer;
uint8_t *buf;
int (*adj_mode)(struct tdo24m *lcd, int mode);
int color_invert;
int power;
int mode;
};
/* use bit 30, 31 as the indicator of command parameter number */
#define CMD0(x) ((0 << 30) | (x))
#define CMD1(x, x1) ((1 << 30) | ((x) << 9) | 0x100 | (x1))
#define CMD2(x, x1, x2) ((2 << 30) | ((x) << 18) | 0x20000 |\
((x1) << 9) | 0x100 | (x2))
#define CMD_NULL (-1)
static const uint32_t lcd_panel_reset[] = {
CMD0(0x1), /* reset */
CMD0(0x0), /* nop */
CMD0(0x0), /* nop */
CMD0(0x0), /* nop */
CMD_NULL,
};
static const uint32_t lcd_panel_on[] = {
CMD0(0x29), /* Display ON */
CMD2(0xB8, 0xFF, 0xF9), /* Output Control */
CMD0(0x11), /* Sleep out */
CMD1(0xB0, 0x16), /* Wake */
CMD_NULL,
};
static const uint32_t lcd_panel_off[] = {
CMD0(0x28), /* Display OFF */
CMD2(0xB8, 0x80, 0x02), /* Output Control */
CMD0(0x10), /* Sleep in */
CMD1(0xB0, 0x00), /* Deep stand by in */
CMD_NULL,
};
static const uint32_t lcd_vga_pass_through_tdo24m[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x80),
CMD1(0xE1, 0x00),
CMD1(0x36, 0x50),
CMD1(0x3B, 0x00),
CMD_NULL,
};
static const uint32_t lcd_qvga_pass_through_tdo24m[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x81),
CMD1(0xE1, 0x00),
CMD1(0x36, 0x50),
CMD1(0x3B, 0x22),
CMD_NULL,
};
static const uint32_t lcd_vga_transfer_tdo24m[] = {
CMD1(0xcf, 0x02), /* Blanking period control (1) */
CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd1, 0x01), /* CKV timing control on/off */
CMD2(0xd2, 0x14, 0x00), /* CKV 1,2 timing control */
CMD2(0xd3, 0x1a, 0x0f), /* OEV timing control */
CMD2(0xd4, 0x1f, 0xaf), /* ASW timing control (1) */
CMD1(0xd5, 0x14), /* ASW timing control (2) */
CMD0(0x21), /* Invert for normally black display */
CMD0(0x29), /* Display on */
CMD_NULL,
};
static const uint32_t lcd_qvga_transfer[] = {
CMD1(0xd6, 0x02), /* Blanking period control (1) */
CMD2(0xd7, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd8, 0x01), /* CKV timing control on/off */
CMD2(0xd9, 0x00, 0x08), /* CKV 1,2 timing control */
CMD2(0xde, 0x05, 0x0a), /* OEV timing control */
CMD2(0xdf, 0x0a, 0x19), /* ASW timing control (1) */
CMD1(0xe0, 0x0a), /* ASW timing control (2) */
CMD0(0x21), /* Invert for normally black display */
CMD0(0x29), /* Display on */
CMD_NULL,
};
static const uint32_t lcd_vga_pass_through_tdo35s[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x80),
CMD1(0xE1, 0x00),
CMD1(0x3B, 0x00),
CMD_NULL,
};
static const uint32_t lcd_qvga_pass_through_tdo35s[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x81),
CMD1(0xE1, 0x00),
CMD1(0x3B, 0x22),
CMD_NULL,
};
static const uint32_t lcd_vga_transfer_tdo35s[] = {
CMD1(0xcf, 0x02), /* Blanking period control (1) */
CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd1, 0x01), /* CKV timing control on/off */
CMD2(0xd2, 0x00, 0x1e), /* CKV 1,2 timing control */
CMD2(0xd3, 0x14, 0x28), /* OEV timing control */
CMD2(0xd4, 0x28, 0x64), /* ASW timing control (1) */
CMD1(0xd5, 0x28), /* ASW timing control (2) */
CMD0(0x21), /* Invert for normally black display */
CMD0(0x29), /* Display on */
CMD_NULL,
};
static const uint32_t lcd_panel_config[] = {
CMD2(0xb8, 0xff, 0xf9), /* Output control */
CMD0(0x11), /* sleep out */
CMD1(0xba, 0x01), /* Display mode (1) */
CMD1(0xbb, 0x00), /* Display mode (2) */
CMD1(0x3a, 0x60), /* Display mode 18-bit RGB */
CMD1(0xbf, 0x10), /* Drive system change control */
CMD1(0xb1, 0x56), /* Booster operation setup */
CMD1(0xb2, 0x33), /* Booster mode setup */
CMD1(0xb3, 0x11), /* Booster frequency setup */
CMD1(0xb4, 0x02), /* Op amp/system clock */
CMD1(0xb5, 0x35), /* VCS voltage */
CMD1(0xb6, 0x40), /* VCOM voltage */
CMD1(0xb7, 0x03), /* External display signal */
CMD1(0xbd, 0x00), /* ASW slew rate */
CMD1(0xbe, 0x00), /* Dummy data for QuadData operation */
CMD1(0xc0, 0x11), /* Sleep out FR count (A) */
CMD1(0xc1, 0x11), /* Sleep out FR count (B) */
CMD1(0xc2, 0x11), /* Sleep out FR count (C) */
CMD2(0xc3, 0x20, 0x40), /* Sleep out FR count (D) */
CMD2(0xc4, 0x60, 0xc0), /* Sleep out FR count (E) */
CMD2(0xc5, 0x10, 0x20), /* Sleep out FR count (F) */
CMD1(0xc6, 0xc0), /* Sleep out FR count (G) */
CMD2(0xc7, 0x33, 0x43), /* Gamma 1 fine tuning (1) */
CMD1(0xc8, 0x44), /* Gamma 1 fine tuning (2) */
CMD1(0xc9, 0x33), /* Gamma 1 inclination adjustment */
CMD1(0xca, 0x00), /* Gamma 1 blue offset adjustment */
CMD2(0xec, 0x01, 0xf0), /* Horizontal clock cycles */
CMD_NULL,
};
static int tdo24m_writes(struct tdo24m *lcd, const uint32_t *array)
{
struct spi_transfer *x = &lcd->xfer;
const uint32_t *p = array;
uint32_t data;
int nparams, err = 0;
for (; *p != CMD_NULL; p++) {
if (!lcd->color_invert && *p == CMD0(0x21))
continue;
nparams = (*p >> 30) & 0x3;
data = *p << (7 - nparams);
switch (nparams) {
case 0:
lcd->buf[0] = (data >> 8) & 0xff;
lcd->buf[1] = data & 0xff;
break;
case 1:
lcd->buf[0] = (data >> 16) & 0xff;
lcd->buf[1] = (data >> 8) & 0xff;
lcd->buf[2] = data & 0xff;
break;
case 2:
lcd->buf[0] = (data >> 24) & 0xff;
lcd->buf[1] = (data >> 16) & 0xff;
lcd->buf[2] = (data >> 8) & 0xff;
lcd->buf[3] = data & 0xff;
break;
default:
continue;
}
x->len = nparams + 2;
err = spi_sync(lcd->spi_dev, &lcd->msg);
if (err)
break;
}
return err;
}
static int tdo24m_adj_mode(struct tdo24m *lcd, int mode)
{
switch (mode) {
case MODE_VGA:
tdo24m_writes(lcd, lcd_vga_pass_through_tdo24m);
tdo24m_writes(lcd, lcd_panel_config);
tdo24m_writes(lcd, lcd_vga_transfer_tdo24m);
break;
case MODE_QVGA:
tdo24m_writes(lcd, lcd_qvga_pass_through_tdo24m);
tdo24m_writes(lcd, lcd_panel_config);
tdo24m_writes(lcd, lcd_qvga_transfer);
break;
default:
return -EINVAL;
}
lcd->mode = mode;
return 0;
}
static int tdo35s_adj_mode(struct tdo24m *lcd, int mode)
{
switch (mode) {
case MODE_VGA:
tdo24m_writes(lcd, lcd_vga_pass_through_tdo35s);
tdo24m_writes(lcd, lcd_panel_config);
tdo24m_writes(lcd, lcd_vga_transfer_tdo35s);
break;
case MODE_QVGA:
tdo24m_writes(lcd, lcd_qvga_pass_through_tdo35s);
tdo24m_writes(lcd, lcd_panel_config);
tdo24m_writes(lcd, lcd_qvga_transfer);
break;
default:
return -EINVAL;
}
lcd->mode = mode;
return 0;
}
static int tdo24m_power_on(struct tdo24m *lcd)
{
int err;
err = tdo24m_writes(lcd, lcd_panel_on);
if (err)
goto out;
err = tdo24m_writes(lcd, lcd_panel_reset);
if (err)
goto out;
err = lcd->adj_mode(lcd, lcd->mode);
out:
return err;
}
static int tdo24m_power_off(struct tdo24m *lcd)
{
return tdo24m_writes(lcd, lcd_panel_off);
}
static int tdo24m_power(struct tdo24m *lcd, int power)
{
int ret = 0;
if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
ret = tdo24m_power_on(lcd);
else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power))
ret = tdo24m_power_off(lcd);
if (!ret)
lcd->power = power;
return ret;
}
static int tdo24m_set_power(struct lcd_device *ld, int power)
{
struct tdo24m *lcd = lcd_get_data(ld);
return tdo24m_power(lcd, power);
}
static int tdo24m_get_power(struct lcd_device *ld)
{
struct tdo24m *lcd = lcd_get_data(ld);
return lcd->power;
}
static int tdo24m_set_mode(struct lcd_device *ld, struct fb_videomode *m)
{
struct tdo24m *lcd = lcd_get_data(ld);
int mode = MODE_QVGA;
if (m->xres == 640 || m->xres == 480)
mode = MODE_VGA;
if (lcd->mode == mode)
return 0;
return lcd->adj_mode(lcd, mode);
}
static struct lcd_ops tdo24m_ops = {
.get_power = tdo24m_get_power,
.set_power = tdo24m_set_power,
.set_mode = tdo24m_set_mode,
};
static int tdo24m_probe(struct spi_device *spi)
{
struct tdo24m *lcd;
struct spi_message *m;
struct spi_transfer *x;
struct tdo24m_platform_data *pdata;
enum tdo24m_model model;
int err;
pdata = dev_get_platdata(&spi->dev);
if (pdata)
model = pdata->model;
else
model = TDO24M;
spi->bits_per_word = 8;
spi->mode = SPI_MODE_3;
err = spi_setup(spi);
if (err)
return err;
lcd = devm_kzalloc(&spi->dev, sizeof(struct tdo24m), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
lcd->spi_dev = spi;
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
if (lcd->buf == NULL)
return -ENOMEM;
m = &lcd->msg;
x = &lcd->xfer;
spi_message_init(m);
x->cs_change = 1;
x->tx_buf = &lcd->buf[0];
spi_message_add_tail(x, m);
switch (model) {
case TDO24M:
lcd->color_invert = 1;
lcd->adj_mode = tdo24m_adj_mode;
break;
case TDO35S:
lcd->adj_mode = tdo35s_adj_mode;
lcd->color_invert = 0;
break;
default:
dev_err(&spi->dev, "Unsupported model");
return -EINVAL;
}
lcd->lcd_dev = devm_lcd_device_register(&spi->dev, "tdo24m", &spi->dev,
lcd, &tdo24m_ops);
if (IS_ERR(lcd->lcd_dev))
return PTR_ERR(lcd->lcd_dev);
spi_set_drvdata(spi, lcd);
err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
if (err)
return err;
return 0;
}
static int tdo24m_remove(struct spi_device *spi)
{
struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tdo24m_suspend(struct device *dev)
{
struct tdo24m *lcd = dev_get_drvdata(dev);
return tdo24m_power(lcd, FB_BLANK_POWERDOWN);
}
static int tdo24m_resume(struct device *dev)
{
struct tdo24m *lcd = dev_get_drvdata(dev);
return tdo24m_power(lcd, FB_BLANK_UNBLANK);
}
#endif
static SIMPLE_DEV_PM_OPS(tdo24m_pm_ops, tdo24m_suspend, tdo24m_resume);
/* Power down all displays on reboot, poweroff or halt */
static void tdo24m_shutdown(struct spi_device *spi)
{
struct tdo24m *lcd = spi_get_drvdata(spi);
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
}
static struct spi_driver tdo24m_driver = {
.driver = {
.name = "tdo24m",
.owner = THIS_MODULE,
.pm = &tdo24m_pm_ops,
},
.probe = tdo24m_probe,
.remove = tdo24m_remove,
.shutdown = tdo24m_shutdown,
};
module_spi_driver(tdo24m_driver);
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
MODULE_DESCRIPTION("Driver for Toppoly TDO24M LCD Panel");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:tdo24m");
| gpl-2.0 |
andi34/kernel_samsung_espresso | arch/mips/kernel/cpu-probe.c | 2180 | 27774 | /*
* Processor capabilities determination functions.
*
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
* Copyright (C) 2001, 2004 MIPS Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/module.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/system.h>
#include <asm/watch.h>
#include <asm/spram.h>
#include <asm/uaccess.h>
/*
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
* the implementation of the "wait" feature differs between CPU families. This
* points to the function that implements CPU specific wait.
* The wait instruction stops the pipeline and reduces the power consumption of
* the CPU very much.
*/
void (*cpu_wait)(void);
EXPORT_SYMBOL(cpu_wait);
static void r3081_wait(void)
{
unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT);
}
static void r39xx_wait(void)
{
local_irq_disable();
if (!need_resched())
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
local_irq_enable();
}
extern void r4k_wait(void);
/*
* This variant is preferable as it allows testing need_resched and going to
* sleep depending on the outcome atomically. Unfortunately the "It is
* implementation-dependent whether the pipeline restarts when a non-enabled
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
* using this version a gamble.
*/
void r4k_wait_irqoff(void)
{
local_irq_disable();
if (!need_resched())
__asm__(" .set push \n"
" .set mips3 \n"
" wait \n"
" .set pop \n");
local_irq_enable();
__asm__(" .globl __pastwait \n"
"__pastwait: \n");
return;
}
/*
* The RM7000 variant has to handle erratum 38. The workaround is to not
* have any pending stores when the WAIT instruction is executed.
*/
static void rm7k_wait_irqoff(void)
{
local_irq_disable();
if (!need_resched())
__asm__(
" .set push \n"
" .set mips3 \n"
" .set noat \n"
" mfc0 $1, $12 \n"
" sync \n"
" mtc0 $1, $12 # stalls until W stage \n"
" wait \n"
" mtc0 $1, $12 # stalls until W stage \n"
" .set pop \n");
local_irq_enable();
}
/*
* The Au1xxx wait is available only if using 32khz counter or
* external timer source, but specifically not CP0 Counter.
* alchemy/common/time.c may override cpu_wait!
*/
static void au1k_wait(void)
{
__asm__(" .set mips3 \n"
" cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n"
" sync \n"
" nop \n"
" wait \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" .set mips0 \n"
: : "r" (au1k_wait));
}
static int __initdata nowait;
static int __init wait_disable(char *s)
{
nowait = 1;
return 1;
}
__setup("nowait", wait_disable);
static int __cpuinitdata mips_fpu_disabled;
static int __init fpu_disable(char *s)
{
cpu_data[0].options &= ~MIPS_CPU_FPU;
mips_fpu_disabled = 1;
return 1;
}
__setup("nofpu", fpu_disable);
int __cpuinitdata mips_dsp_disabled;
static int __init dsp_disable(char *s)
{
cpu_data[0].ases &= ~MIPS_ASE_DSP;
mips_dsp_disabled = 1;
return 1;
}
__setup("nodsp", dsp_disable);
void __init check_wait(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
if (nowait) {
printk("Wait instruction disabled.\n");
return;
}
switch (c->cputype) {
case CPU_R3081:
case CPU_R3081E:
cpu_wait = r3081_wait;
break;
case CPU_TX3927:
cpu_wait = r39xx_wait;
break;
case CPU_R4200:
/* case CPU_R4300: */
case CPU_R4600:
case CPU_R4640:
case CPU_R4650:
case CPU_R4700:
case CPU_R5000:
case CPU_R5500:
case CPU_NEVADA:
case CPU_4KC:
case CPU_4KEC:
case CPU_4KSC:
case CPU_5KC:
case CPU_25KF:
case CPU_PR4450:
case CPU_BMIPS3300:
case CPU_BMIPS4350:
case CPU_BMIPS4380:
case CPU_BMIPS5000:
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_JZRISC:
cpu_wait = r4k_wait;
break;
case CPU_RM7000:
cpu_wait = rm7k_wait_irqoff;
break;
case CPU_24K:
case CPU_34K:
case CPU_1004K:
cpu_wait = r4k_wait;
if (read_c0_config7() & MIPS_CONF7_WII)
cpu_wait = r4k_wait_irqoff;
break;
case CPU_74K:
cpu_wait = r4k_wait;
if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
cpu_wait = r4k_wait_irqoff;
break;
case CPU_TX49XX:
cpu_wait = r4k_wait_irqoff;
break;
case CPU_ALCHEMY:
cpu_wait = au1k_wait;
break;
case CPU_20KC:
/*
* WAIT on Rev1.0 has E1, E2, E3 and E16.
* WAIT on Rev2.0 and Rev3.0 has E16.
* Rev3.1 WAIT is nop, why bother
*/
if ((c->processor_id & 0xff) <= 0x64)
break;
/*
* Another rev is incremeting c0_count at a reduced clock
* rate while in WAIT mode. So we basically have the choice
* between using the cp0 timer as clocksource or avoiding
* the WAIT instruction. Until more details are known,
* disable the use of WAIT for 20Kc entirely.
cpu_wait = r4k_wait;
*/
break;
case CPU_RM9000:
if ((c->processor_id & 0x00ff) >= 0x40)
cpu_wait = r4k_wait;
break;
default:
break;
}
}
static inline void check_errata(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
switch (c->cputype) {
case CPU_34K:
/*
* Erratum "RPS May Cause Incorrect Instruction Execution"
* This code only handles VPE0, any SMP/SMTC/RTOS code
* making use of VPE1 will be responsable for that VPE.
*/
if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS);
break;
default:
break;
}
}
void __init check_bugs32(void)
{
check_errata();
}
/*
* Probe whether cpu has config register by trying to play with
* alternate cache bit and see whether it matters.
* It's used by cpu_probe to distinguish between R3000A and R3081.
*/
static inline int cpu_has_confreg(void)
{
#ifdef CONFIG_CPU_R3000
extern unsigned long r3k_cache_size(unsigned long);
unsigned long size1, size2;
unsigned long cfg = read_c0_conf();
size1 = r3k_cache_size(ST0_ISC);
write_c0_conf(cfg ^ R30XX_CONF_AC);
size2 = r3k_cache_size(ST0_ISC);
write_c0_conf(cfg);
return size1 != size2;
#else
return 0;
#endif
}
static inline void set_elf_platform(int cpu, const char *plat)
{
if (cpu == 0)
__elf_platform = plat;
}
/*
* Get the FPU Implementation/Revision.
*/
static inline unsigned long cpu_get_fpu_id(void)
{
unsigned long tmp, fpu_id;
tmp = read_c0_status();
__enable_fpu();
fpu_id = read_32bit_cp1_register(CP1_REVISION);
write_c0_status(tmp);
return fpu_id;
}
/*
* Check the CPU has an FPU the official way.
*/
static inline int __cpu_has_fpu(void)
{
return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE);
}
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
{
#ifdef __NEED_VMBITS_PROBE
write_c0_entryhi(0x3fffffffffffe000ULL);
back_to_back_c0_hazard();
c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
#endif
}
#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
| MIPS_CPU_COUNTER)
static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
{
switch (c->processor_id & 0xff00) {
case PRID_IMP_R2000:
c->cputype = CPU_R2000;
__cpu_name[cpu] = "R2000";
c->isa_level = MIPS_CPU_ISA_I;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
case PRID_IMP_R3000:
if ((c->processor_id & 0xff) == PRID_REV_R3000A) {
if (cpu_has_confreg()) {
c->cputype = CPU_R3081E;
__cpu_name[cpu] = "R3081";
} else {
c->cputype = CPU_R3000A;
__cpu_name[cpu] = "R3000A";
}
break;
} else {
c->cputype = CPU_R3000;
__cpu_name[cpu] = "R3000";
}
c->isa_level = MIPS_CPU_ISA_I;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
case PRID_IMP_R4000:
if (read_c0_config() & CONF_SC) {
if ((c->processor_id & 0xff) >= PRID_REV_R4400) {
c->cputype = CPU_R4400PC;
__cpu_name[cpu] = "R4400PC";
} else {
c->cputype = CPU_R4000PC;
__cpu_name[cpu] = "R4000PC";
}
} else {
if ((c->processor_id & 0xff) >= PRID_REV_R4400) {
c->cputype = CPU_R4400SC;
__cpu_name[cpu] = "R4400SC";
} else {
c->cputype = CPU_R4000SC;
__cpu_name[cpu] = "R4000SC";
}
}
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_VCE |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_VR41XX:
switch (c->processor_id & 0xf0) {
case PRID_REV_VR4111:
c->cputype = CPU_VR4111;
__cpu_name[cpu] = "NEC VR4111";
break;
case PRID_REV_VR4121:
c->cputype = CPU_VR4121;
__cpu_name[cpu] = "NEC VR4121";
break;
case PRID_REV_VR4122:
if ((c->processor_id & 0xf) < 0x3) {
c->cputype = CPU_VR4122;
__cpu_name[cpu] = "NEC VR4122";
} else {
c->cputype = CPU_VR4181A;
__cpu_name[cpu] = "NEC VR4181A";
}
break;
case PRID_REV_VR4130:
if ((c->processor_id & 0xf) < 0x4) {
c->cputype = CPU_VR4131;
__cpu_name[cpu] = "NEC VR4131";
} else {
c->cputype = CPU_VR4133;
__cpu_name[cpu] = "NEC VR4133";
}
break;
default:
printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
c->cputype = CPU_VR41XX;
__cpu_name[cpu] = "NEC Vr41xx";
break;
}
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS;
c->tlbsize = 32;
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
__cpu_name[cpu] = "R4300";
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R4600:
c->cputype = CPU_R4600;
__cpu_name[cpu] = "R4600";
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#if 0
case PRID_IMP_R4650:
/*
* This processor doesn't have an MMU, so it's not
* "real easy" to run Linux on it. It is left purely
* for documentation. Commented out because it shares
* it's c0_prid id number with the TX3900.
*/
c->cputype = CPU_R4650;
__cpu_name[cpu] = "R4650";
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#endif
case PRID_IMP_TX39:
c->isa_level = MIPS_CPU_ISA_I;
c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
c->cputype = CPU_TX3927;
__cpu_name[cpu] = "TX3927";
c->tlbsize = 64;
} else {
switch (c->processor_id & 0xff) {
case PRID_REV_TX3912:
c->cputype = CPU_TX3912;
__cpu_name[cpu] = "TX3912";
c->tlbsize = 32;
break;
case PRID_REV_TX3922:
c->cputype = CPU_TX3922;
__cpu_name[cpu] = "TX3922";
c->tlbsize = 64;
break;
}
}
break;
case PRID_IMP_R4700:
c->cputype = CPU_R4700;
__cpu_name[cpu] = "R4700";
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_TX49:
c->cputype = CPU_TX49XX;
__cpu_name[cpu] = "R49XX";
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_LLSC;
if (!(c->processor_id & 0x08))
c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
c->tlbsize = 48;
break;
case PRID_IMP_R5000:
c->cputype = CPU_R5000;
__cpu_name[cpu] = "R5000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5432:
c->cputype = CPU_R5432;
__cpu_name[cpu] = "R5432";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5500:
c->cputype = CPU_R5500;
__cpu_name[cpu] = "R5500";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_NEVADA:
c->cputype = CPU_NEVADA;
__cpu_name[cpu] = "Nevada";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R6000:
c->cputype = CPU_R6000;
__cpu_name[cpu] = "R6000";
c->isa_level = MIPS_CPU_ISA_II;
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R6000A:
c->cputype = CPU_R6000A;
__cpu_name[cpu] = "R6000A";
c->isa_level = MIPS_CPU_ISA_II;
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_RM7000:
c->cputype = CPU_RM7000;
__cpu_name[cpu] = "RM7000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
/*
* Undocumented RM7000: Bit 29 in the info register of
* the RM7000 v2.0 indicates if the TLB has 48 or 64
* entries.
*
* 29 1 => 64 entry JTLB
* 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_RM9000:
c->cputype = CPU_RM9000;
__cpu_name[cpu] = "RM9000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
/*
* Bit 29 in the info register of the RM9000
* indicates if the TLB has 48 or 64 entries.
*
* 29 1 => 64 entry JTLB
* 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_R8000:
c->cputype = CPU_R8000;
__cpu_name[cpu] = "RM8000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 384; /* has weird TLB: 3-way x 128 */
break;
case PRID_IMP_R10000:
c->cputype = CPU_R10000;
__cpu_name[cpu] = "R10000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R12000:
c->cputype = CPU_R12000;
__cpu_name[cpu] = "R12000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R14000:
c->cputype = CPU_R14000;
__cpu_name[cpu] = "R14000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_LOONGSON2:
c->cputype = CPU_LOONGSON2;
__cpu_name[cpu] = "ICT Loongson-2";
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON2E:
set_elf_platform(cpu, "loongson2e");
break;
case PRID_REV_LOONGSON2F:
set_elf_platform(cpu, "loongson2f");
break;
}
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS |
MIPS_CPU_FPU | MIPS_CPU_LLSC |
MIPS_CPU_32FPR;
c->tlbsize = 64;
break;
}
}
static char unknown_isa[] __cpuinitdata = KERN_ERR \
"Unsupported ISA type, c0.config0: %d.";
static inline unsigned int decode_config0(struct cpuinfo_mips *c)
{
unsigned int config0;
int isa;
config0 = read_c0_config();
if (((config0 & MIPS_CONF_MT) >> 7) == 1)
c->options |= MIPS_CPU_TLB;
isa = (config0 & MIPS_CONF_AT) >> 13;
switch (isa) {
case 0:
switch ((config0 & MIPS_CONF_AR) >> 10) {
case 0:
c->isa_level = MIPS_CPU_ISA_M32R1;
break;
case 1:
c->isa_level = MIPS_CPU_ISA_M32R2;
break;
default:
goto unknown;
}
break;
case 2:
switch ((config0 & MIPS_CONF_AR) >> 10) {
case 0:
c->isa_level = MIPS_CPU_ISA_M64R1;
break;
case 1:
c->isa_level = MIPS_CPU_ISA_M64R2;
break;
default:
goto unknown;
}
break;
default:
goto unknown;
}
return config0 & MIPS_CONF_M;
unknown:
panic(unknown_isa, config0);
}
static inline unsigned int decode_config1(struct cpuinfo_mips *c)
{
unsigned int config1;
config1 = read_c0_config1();
if (config1 & MIPS_CONF1_MD)
c->ases |= MIPS_ASE_MDMX;
if (config1 & MIPS_CONF1_WR)
c->options |= MIPS_CPU_WATCH;
if (config1 & MIPS_CONF1_CA)
c->ases |= MIPS_ASE_MIPS16;
if (config1 & MIPS_CONF1_EP)
c->options |= MIPS_CPU_EJTAG;
if (config1 & MIPS_CONF1_FP) {
c->options |= MIPS_CPU_FPU;
c->options |= MIPS_CPU_32FPR;
}
if (cpu_has_tlb)
c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
return config1 & MIPS_CONF_M;
}
static inline unsigned int decode_config2(struct cpuinfo_mips *c)
{
unsigned int config2;
config2 = read_c0_config2();
if (config2 & MIPS_CONF2_SL)
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
return config2 & MIPS_CONF_M;
}
static inline unsigned int decode_config3(struct cpuinfo_mips *c)
{
unsigned int config3;
config3 = read_c0_config3();
if (config3 & MIPS_CONF3_SM)
c->ases |= MIPS_ASE_SMARTMIPS;
if (config3 & MIPS_CONF3_DSP)
c->ases |= MIPS_ASE_DSP;
if (config3 & MIPS_CONF3_VINT)
c->options |= MIPS_CPU_VINT;
if (config3 & MIPS_CONF3_VEIC)
c->options |= MIPS_CPU_VEIC;
if (config3 & MIPS_CONF3_MT)
c->ases |= MIPS_ASE_MIPSMT;
if (config3 & MIPS_CONF3_ULRI)
c->options |= MIPS_CPU_ULRI;
return config3 & MIPS_CONF_M;
}
static inline unsigned int decode_config4(struct cpuinfo_mips *c)
{
unsigned int config4;
config4 = read_c0_config4();
if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT
&& cpu_has_tlb)
c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
c->kscratch_mask = (config4 >> 16) & 0xff;
return config4 & MIPS_CONF_M;
}
static void __cpuinit decode_configs(struct cpuinfo_mips *c)
{
int ok;
/* MIPS32 or MIPS64 compliant CPU. */
c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
ok = decode_config0(c); /* Read Config registers. */
BUG_ON(!ok); /* Arch spec violation! */
if (ok)
ok = decode_config1(c);
if (ok)
ok = decode_config2(c);
if (ok)
ok = decode_config3(c);
if (ok)
ok = decode_config4(c);
mips_probe_watch_registers(c);
if (cpu_has_mips_r2)
c->core = read_c0_ebase() & 0x3ff;
}
static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_4KC:
c->cputype = CPU_4KC;
__cpu_name[cpu] = "MIPS 4Kc";
break;
case PRID_IMP_4KEC:
case PRID_IMP_4KECR2:
c->cputype = CPU_4KEC;
__cpu_name[cpu] = "MIPS 4KEc";
break;
case PRID_IMP_4KSC:
case PRID_IMP_4KSD:
c->cputype = CPU_4KSC;
__cpu_name[cpu] = "MIPS 4KSc";
break;
case PRID_IMP_5KC:
c->cputype = CPU_5KC;
__cpu_name[cpu] = "MIPS 5Kc";
break;
case PRID_IMP_20KC:
c->cputype = CPU_20KC;
__cpu_name[cpu] = "MIPS 20Kc";
break;
case PRID_IMP_24K:
case PRID_IMP_24KE:
c->cputype = CPU_24K;
__cpu_name[cpu] = "MIPS 24Kc";
break;
case PRID_IMP_25KF:
c->cputype = CPU_25KF;
__cpu_name[cpu] = "MIPS 25Kc";
break;
case PRID_IMP_34K:
c->cputype = CPU_34K;
__cpu_name[cpu] = "MIPS 34Kc";
break;
case PRID_IMP_74K:
c->cputype = CPU_74K;
__cpu_name[cpu] = "MIPS 74Kc";
break;
case PRID_IMP_1004K:
c->cputype = CPU_1004K;
__cpu_name[cpu] = "MIPS 1004Kc";
break;
}
spram_config();
}
static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_AU1_REV1:
case PRID_IMP_AU1_REV2:
c->cputype = CPU_ALCHEMY;
switch ((c->processor_id >> 24) & 0xff) {
case 0:
__cpu_name[cpu] = "Au1000";
break;
case 1:
__cpu_name[cpu] = "Au1500";
break;
case 2:
__cpu_name[cpu] = "Au1100";
break;
case 3:
__cpu_name[cpu] = "Au1550";
break;
case 4:
__cpu_name[cpu] = "Au1200";
if ((c->processor_id & 0xff) == 2)
__cpu_name[cpu] = "Au1250";
break;
case 5:
__cpu_name[cpu] = "Au1210";
break;
default:
__cpu_name[cpu] = "Au1xxx";
break;
}
break;
}
}
static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_SB1:
c->cputype = CPU_SB1;
__cpu_name[cpu] = "SiByte SB1";
/* FPU in pass1 is known to have issues. */
if ((c->processor_id & 0xff) < 0x02)
c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
break;
case PRID_IMP_SB1A:
c->cputype = CPU_SB1A;
__cpu_name[cpu] = "SiByte SB1A";
break;
}
}
static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_SR71000:
c->cputype = CPU_SR71000;
__cpu_name[cpu] = "Sandcraft SR71000";
c->scache.ways = 8;
c->tlbsize = 64;
break;
}
}
static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_PR4450:
c->cputype = CPU_PR4450;
__cpu_name[cpu] = "Philips PR4450";
c->isa_level = MIPS_CPU_ISA_M32R1;
break;
}
}
static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_BMIPS32_REV4:
case PRID_IMP_BMIPS32_REV8:
c->cputype = CPU_BMIPS32;
__cpu_name[cpu] = "Broadcom BMIPS32";
set_elf_platform(cpu, "bmips32");
break;
case PRID_IMP_BMIPS3300:
case PRID_IMP_BMIPS3300_ALT:
case PRID_IMP_BMIPS3300_BUG:
c->cputype = CPU_BMIPS3300;
__cpu_name[cpu] = "Broadcom BMIPS3300";
set_elf_platform(cpu, "bmips3300");
break;
case PRID_IMP_BMIPS43XX: {
int rev = c->processor_id & 0xff;
if (rev >= PRID_REV_BMIPS4380_LO &&
rev <= PRID_REV_BMIPS4380_HI) {
c->cputype = CPU_BMIPS4380;
__cpu_name[cpu] = "Broadcom BMIPS4380";
set_elf_platform(cpu, "bmips4380");
} else {
c->cputype = CPU_BMIPS4350;
__cpu_name[cpu] = "Broadcom BMIPS4350";
set_elf_platform(cpu, "bmips4350");
}
break;
}
case PRID_IMP_BMIPS5000:
c->cputype = CPU_BMIPS5000;
__cpu_name[cpu] = "Broadcom BMIPS5000";
set_elf_platform(cpu, "bmips5000");
c->options |= MIPS_CPU_ULRI;
break;
}
}
static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_CAVIUM_CN38XX:
case PRID_IMP_CAVIUM_CN31XX:
case PRID_IMP_CAVIUM_CN30XX:
c->cputype = CPU_CAVIUM_OCTEON;
__cpu_name[cpu] = "Cavium Octeon";
goto platform;
case PRID_IMP_CAVIUM_CN58XX:
case PRID_IMP_CAVIUM_CN56XX:
case PRID_IMP_CAVIUM_CN50XX:
case PRID_IMP_CAVIUM_CN52XX:
c->cputype = CPU_CAVIUM_OCTEON_PLUS;
__cpu_name[cpu] = "Cavium Octeon+";
platform:
set_elf_platform(cpu, "octeon");
break;
case PRID_IMP_CAVIUM_CN63XX:
c->cputype = CPU_CAVIUM_OCTEON2;
__cpu_name[cpu] = "Cavium Octeon II";
set_elf_platform(cpu, "octeon2");
break;
default:
printk(KERN_INFO "Unknown Octeon chip!\n");
c->cputype = CPU_UNKNOWN;
break;
}
}
static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
/* JZRISC does not implement the CP0 counter. */
c->options &= ~MIPS_CPU_COUNTER;
switch (c->processor_id & 0xff00) {
case PRID_IMP_JZRISC:
c->cputype = CPU_JZRISC;
__cpu_name[cpu] = "Ingenic JZRISC";
break;
default:
panic("Unknown Ingenic Processor ID!");
break;
}
}
static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
{
decode_configs(c);
c->options = (MIPS_CPU_TLB |
MIPS_CPU_4KEX |
MIPS_CPU_COUNTER |
MIPS_CPU_DIVEC |
MIPS_CPU_WATCH |
MIPS_CPU_EJTAG |
MIPS_CPU_LLSC);
switch (c->processor_id & 0xff00) {
case PRID_IMP_NETLOGIC_XLR732:
case PRID_IMP_NETLOGIC_XLR716:
case PRID_IMP_NETLOGIC_XLR532:
case PRID_IMP_NETLOGIC_XLR308:
case PRID_IMP_NETLOGIC_XLR532C:
case PRID_IMP_NETLOGIC_XLR516C:
case PRID_IMP_NETLOGIC_XLR508C:
case PRID_IMP_NETLOGIC_XLR308C:
c->cputype = CPU_XLR;
__cpu_name[cpu] = "Netlogic XLR";
break;
case PRID_IMP_NETLOGIC_XLS608:
case PRID_IMP_NETLOGIC_XLS408:
case PRID_IMP_NETLOGIC_XLS404:
case PRID_IMP_NETLOGIC_XLS208:
case PRID_IMP_NETLOGIC_XLS204:
case PRID_IMP_NETLOGIC_XLS108:
case PRID_IMP_NETLOGIC_XLS104:
case PRID_IMP_NETLOGIC_XLS616B:
case PRID_IMP_NETLOGIC_XLS608B:
case PRID_IMP_NETLOGIC_XLS416B:
case PRID_IMP_NETLOGIC_XLS412B:
case PRID_IMP_NETLOGIC_XLS408B:
case PRID_IMP_NETLOGIC_XLS404B:
c->cputype = CPU_XLR;
__cpu_name[cpu] = "Netlogic XLS";
break;
default:
printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n",
c->processor_id);
c->cputype = CPU_XLR;
break;
}
c->isa_level = MIPS_CPU_ISA_M64R1;
c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
}
#ifdef CONFIG_64BIT
/* For use by uaccess.h */
u64 __ua_limit;
EXPORT_SYMBOL(__ua_limit);
#endif
const char *__cpu_name[NR_CPUS];
const char *__elf_platform;
__cpuinit void cpu_probe(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int cpu = smp_processor_id();
c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;
c->processor_id = read_c0_prid();
switch (c->processor_id & 0xff0000) {
case PRID_COMP_LEGACY:
cpu_probe_legacy(c, cpu);
break;
case PRID_COMP_MIPS:
cpu_probe_mips(c, cpu);
break;
case PRID_COMP_ALCHEMY:
cpu_probe_alchemy(c, cpu);
break;
case PRID_COMP_SIBYTE:
cpu_probe_sibyte(c, cpu);
break;
case PRID_COMP_BROADCOM:
cpu_probe_broadcom(c, cpu);
break;
case PRID_COMP_SANDCRAFT:
cpu_probe_sandcraft(c, cpu);
break;
case PRID_COMP_NXP:
cpu_probe_nxp(c, cpu);
break;
case PRID_COMP_CAVIUM:
cpu_probe_cavium(c, cpu);
break;
case PRID_COMP_INGENIC:
cpu_probe_ingenic(c, cpu);
break;
case PRID_COMP_NETLOGIC:
cpu_probe_netlogic(c, cpu);
break;
}
BUG_ON(!__cpu_name[cpu]);
BUG_ON(c->cputype == CPU_UNKNOWN);
/*
* Platform code can force the cpu type to optimize code
* generation. In that case be sure the cpu type is correctly
* manually setup otherwise it could trigger some nasty bugs.
*/
BUG_ON(current_cpu_type() != c->cputype);
if (mips_fpu_disabled)
c->options &= ~MIPS_CPU_FPU;
if (mips_dsp_disabled)
c->ases &= ~MIPS_ASE_DSP;
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
c->isa_level == MIPS_CPU_ISA_M32R2 ||
c->isa_level == MIPS_CPU_ISA_M64R1 ||
c->isa_level == MIPS_CPU_ISA_M64R2) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
}
}
if (cpu_has_mips_r2)
c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
else
c->srsets = 1;
cpu_probe_vmbits(c);
#ifdef CONFIG_64BIT
if (cpu == 0)
__ua_limit = ~((1ull << cpu_vmbits) - 1);
#endif
}
__cpuinit void cpu_report(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
printk(KERN_INFO "CPU revision is: %08x (%s)\n",
c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
}
| gpl-2.0 |
nazunamoe/jolla-kernel_bullhead | drivers/mfd/da9052-irq.c | 3460 | 6138 | /*
* DA9052 interrupt support
*
* Author: Fabio Estevam <fabio.estevam@freescale.com>
* Based on arizona-irq.c, which is:
*
* Copyright 2012 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
#define DA9052_NUM_IRQ_REGS 4
#define DA9052_IRQ_MASK_POS_1 0x01
#define DA9052_IRQ_MASK_POS_2 0x02
#define DA9052_IRQ_MASK_POS_3 0x04
#define DA9052_IRQ_MASK_POS_4 0x08
#define DA9052_IRQ_MASK_POS_5 0x10
#define DA9052_IRQ_MASK_POS_6 0x20
#define DA9052_IRQ_MASK_POS_7 0x40
#define DA9052_IRQ_MASK_POS_8 0x80
static struct regmap_irq da9052_irqs[] = {
[DA9052_IRQ_DCIN] = {
.reg_offset = 0,
.mask = DA9052_IRQ_MASK_POS_1,
},
[DA9052_IRQ_VBUS] = {
.reg_offset = 0,
.mask = DA9052_IRQ_MASK_POS_2,
},
[DA9052_IRQ_DCINREM] = {
.reg_offset = 0,
.mask = DA9052_IRQ_MASK_POS_3,
},
[DA9052_IRQ_VBUSREM] = {
.reg_offset = 0,
.mask = DA9052_IRQ_MASK_POS_4,
},
[DA9052_IRQ_VDDLOW] = {
.reg_offset = 0,
.mask = DA9052_IRQ_MASK_POS_5,
},
[DA9052_IRQ_ALARM] = {
.reg_offset = 0,
.mask = DA9052_IRQ_MASK_POS_6,
},
[DA9052_IRQ_SEQRDY] = {
.reg_offset = 0,
.mask = DA9052_IRQ_MASK_POS_7,
},
[DA9052_IRQ_COMP1V2] = {
.reg_offset = 0,
.mask = DA9052_IRQ_MASK_POS_8,
},
[DA9052_IRQ_NONKEY] = {
.reg_offset = 1,
.mask = DA9052_IRQ_MASK_POS_1,
},
[DA9052_IRQ_IDFLOAT] = {
.reg_offset = 1,
.mask = DA9052_IRQ_MASK_POS_2,
},
[DA9052_IRQ_IDGND] = {
.reg_offset = 1,
.mask = DA9052_IRQ_MASK_POS_3,
},
[DA9052_IRQ_CHGEND] = {
.reg_offset = 1,
.mask = DA9052_IRQ_MASK_POS_4,
},
[DA9052_IRQ_TBAT] = {
.reg_offset = 1,
.mask = DA9052_IRQ_MASK_POS_5,
},
[DA9052_IRQ_ADC_EOM] = {
.reg_offset = 1,
.mask = DA9052_IRQ_MASK_POS_6,
},
[DA9052_IRQ_PENDOWN] = {
.reg_offset = 1,
.mask = DA9052_IRQ_MASK_POS_7,
},
[DA9052_IRQ_TSIREADY] = {
.reg_offset = 1,
.mask = DA9052_IRQ_MASK_POS_8,
},
[DA9052_IRQ_GPI0] = {
.reg_offset = 2,
.mask = DA9052_IRQ_MASK_POS_1,
},
[DA9052_IRQ_GPI1] = {
.reg_offset = 2,
.mask = DA9052_IRQ_MASK_POS_2,
},
[DA9052_IRQ_GPI2] = {
.reg_offset = 2,
.mask = DA9052_IRQ_MASK_POS_3,
},
[DA9052_IRQ_GPI3] = {
.reg_offset = 2,
.mask = DA9052_IRQ_MASK_POS_4,
},
[DA9052_IRQ_GPI4] = {
.reg_offset = 2,
.mask = DA9052_IRQ_MASK_POS_5,
},
[DA9052_IRQ_GPI5] = {
.reg_offset = 2,
.mask = DA9052_IRQ_MASK_POS_6,
},
[DA9052_IRQ_GPI6] = {
.reg_offset = 2,
.mask = DA9052_IRQ_MASK_POS_7,
},
[DA9052_IRQ_GPI7] = {
.reg_offset = 2,
.mask = DA9052_IRQ_MASK_POS_8,
},
[DA9052_IRQ_GPI8] = {
.reg_offset = 3,
.mask = DA9052_IRQ_MASK_POS_1,
},
[DA9052_IRQ_GPI9] = {
.reg_offset = 3,
.mask = DA9052_IRQ_MASK_POS_2,
},
[DA9052_IRQ_GPI10] = {
.reg_offset = 3,
.mask = DA9052_IRQ_MASK_POS_3,
},
[DA9052_IRQ_GPI11] = {
.reg_offset = 3,
.mask = DA9052_IRQ_MASK_POS_4,
},
[DA9052_IRQ_GPI12] = {
.reg_offset = 3,
.mask = DA9052_IRQ_MASK_POS_5,
},
[DA9052_IRQ_GPI13] = {
.reg_offset = 3,
.mask = DA9052_IRQ_MASK_POS_6,
},
[DA9052_IRQ_GPI14] = {
.reg_offset = 3,
.mask = DA9052_IRQ_MASK_POS_7,
},
[DA9052_IRQ_GPI15] = {
.reg_offset = 3,
.mask = DA9052_IRQ_MASK_POS_8,
},
};
static struct regmap_irq_chip da9052_regmap_irq_chip = {
.name = "da9052_irq",
.status_base = DA9052_EVENT_A_REG,
.mask_base = DA9052_IRQ_MASK_A_REG,
.ack_base = DA9052_EVENT_A_REG,
.num_regs = DA9052_NUM_IRQ_REGS,
.irqs = da9052_irqs,
.num_irqs = ARRAY_SIZE(da9052_irqs),
};
static int da9052_map_irq(struct da9052 *da9052, int irq)
{
return regmap_irq_get_virq(da9052->irq_data, irq);
}
int da9052_enable_irq(struct da9052 *da9052, int irq)
{
irq = da9052_map_irq(da9052, irq);
if (irq < 0)
return irq;
enable_irq(irq);
return 0;
}
EXPORT_SYMBOL_GPL(da9052_enable_irq);
int da9052_disable_irq(struct da9052 *da9052, int irq)
{
irq = da9052_map_irq(da9052, irq);
if (irq < 0)
return irq;
disable_irq(irq);
return 0;
}
EXPORT_SYMBOL_GPL(da9052_disable_irq);
int da9052_disable_irq_nosync(struct da9052 *da9052, int irq)
{
irq = da9052_map_irq(da9052, irq);
if (irq < 0)
return irq;
disable_irq_nosync(irq);
return 0;
}
EXPORT_SYMBOL_GPL(da9052_disable_irq_nosync);
int da9052_request_irq(struct da9052 *da9052, int irq, char *name,
irq_handler_t handler, void *data)
{
irq = da9052_map_irq(da9052, irq);
if (irq < 0)
return irq;
return request_threaded_irq(irq, NULL, handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
name, data);
}
EXPORT_SYMBOL_GPL(da9052_request_irq);
void da9052_free_irq(struct da9052 *da9052, int irq, void *data)
{
irq = da9052_map_irq(da9052, irq);
if (irq < 0)
return;
free_irq(irq, data);
}
EXPORT_SYMBOL_GPL(da9052_free_irq);
static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
{
struct da9052 *da9052 = irq_data;
complete(&da9052->done);
return IRQ_HANDLED;
}
int da9052_irq_init(struct da9052 *da9052)
{
int ret;
ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-1, &da9052_regmap_irq_chip,
&da9052->irq_data);
if (ret < 0) {
dev_err(da9052->dev, "regmap_add_irq_chip failed: %d\n", ret);
goto regmap_err;
}
ret = da9052_request_irq(da9052, DA9052_IRQ_ADC_EOM, "adc-irq",
da9052_auxadc_irq, da9052);
if (ret != 0) {
dev_err(da9052->dev, "DA9052_IRQ_ADC_EOM failed: %d\n", ret);
goto request_irq_err;
}
return 0;
request_irq_err:
regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
regmap_err:
return ret;
}
int da9052_irq_exit(struct da9052 *da9052)
{
da9052_free_irq(da9052, DA9052_IRQ_ADC_EOM , da9052);
regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
return 0;
}
| gpl-2.0 |
richardtrip/endeavoru | drivers/isdn/act2000/module.c | 4228 | 21473 | /* $Id: module.c,v 1.14.6.4 2001/09/23 22:24:32 kai Exp $
*
* ISDN lowlevel-module for the IBM ISDN-S0 Active 2000.
*
* Author Fritz Elfert
* Copyright by Fritz Elfert <fritz@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Thanks to Friedemann Baitinger and IBM Germany
*
*/
#include "act2000.h"
#include "act2000_isa.h"
#include "capi.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
static unsigned short act2000_isa_ports[] =
{
0x0200, 0x0240, 0x0280, 0x02c0, 0x0300, 0x0340, 0x0380,
0xcfe0, 0xcfa0, 0xcf60, 0xcf20, 0xcee0, 0xcea0, 0xce60,
};
static act2000_card *cards = (act2000_card *) NULL;
/* Parameters to be set by insmod */
static int act_bus = 0;
static int act_port = -1; /* -1 = Autoprobe */
static int act_irq = -1;
static char *act_id = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
MODULE_DESCRIPTION( "ISDN4Linux: Driver for IBM Active 2000 ISDN card");
MODULE_AUTHOR( "Fritz Elfert");
MODULE_LICENSE( "GPL");
MODULE_PARM_DESC(act_bus, "BusType of first card, 1=ISA, 2=MCA, 3=PCMCIA, currently only ISA");
MODULE_PARM_DESC(membase, "Base port address of first card");
MODULE_PARM_DESC(act_irq, "IRQ of first card");
MODULE_PARM_DESC(act_id, "ID-String of first card");
module_param(act_bus, int, 0);
module_param(act_port, int, 0);
module_param(act_irq, int, 0);
module_param(act_id, charp, 0);
static int act2000_addcard(int, int, int, char *);
static act2000_chan *
find_channel(act2000_card *card, int channel)
{
if ((channel >= 0) && (channel < ACT2000_BCH))
return &(card->bch[channel]);
printk(KERN_WARNING "act2000: Invalid channel %d\n", channel);
return NULL;
}
/*
* Free MSN list
*/
static void
act2000_clear_msn(act2000_card *card)
{
struct msn_entry *p = card->msn_list;
struct msn_entry *q;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
card->msn_list = NULL;
spin_unlock_irqrestore(&card->lock, flags);
while (p) {
q = p->next;
kfree(p);
p = q;
}
}
/*
* Find an MSN entry in the list.
* If ia5 != 0, return IA5-encoded EAZ, else
* return a bitmask with corresponding bit set.
*/
static __u16
act2000_find_msn(act2000_card *card, char *msn, int ia5)
{
struct msn_entry *p = card->msn_list;
__u8 eaz = '0';
while (p) {
if (!strcmp(p->msn, msn)) {
eaz = p->eaz;
break;
}
p = p->next;
}
if (!ia5)
return (1 << (eaz - '0'));
else
return eaz;
}
/*
* Find an EAZ entry in the list.
* return a string with corresponding msn.
*/
char *
act2000_find_eaz(act2000_card *card, char eaz)
{
struct msn_entry *p = card->msn_list;
while (p) {
if (p->eaz == eaz)
return(p->msn);
p = p->next;
}
return("\0");
}
/*
* Add or delete an MSN to the MSN list
*
* First character of msneaz is EAZ, rest is MSN.
* If length of eazmsn is 1, delete that entry.
*/
static int
act2000_set_msn(act2000_card *card, char *eazmsn)
{
struct msn_entry *p = card->msn_list;
struct msn_entry *q = NULL;
unsigned long flags;
int i;
if (!strlen(eazmsn))
return 0;
if (strlen(eazmsn) > 16)
return -EINVAL;
for (i = 0; i < strlen(eazmsn); i++)
if (!isdigit(eazmsn[i]))
return -EINVAL;
if (strlen(eazmsn) == 1) {
/* Delete a single MSN */
while (p) {
if (p->eaz == eazmsn[0]) {
spin_lock_irqsave(&card->lock, flags);
if (q)
q->next = p->next;
else
card->msn_list = p->next;
spin_unlock_irqrestore(&card->lock, flags);
kfree(p);
printk(KERN_DEBUG
"Mapping for EAZ %c deleted\n",
eazmsn[0]);
return 0;
}
q = p;
p = p->next;
}
return 0;
}
/* Add a single MSN */
while (p) {
/* Found in list, replace MSN */
if (p->eaz == eazmsn[0]) {
spin_lock_irqsave(&card->lock, flags);
strcpy(p->msn, &eazmsn[1]);
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_DEBUG
"Mapping for EAZ %c changed to %s\n",
eazmsn[0],
&eazmsn[1]);
return 0;
}
p = p->next;
}
/* Not found in list, add new entry */
p = kmalloc(sizeof(msn_entry), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->eaz = eazmsn[0];
strcpy(p->msn, &eazmsn[1]);
p->next = card->msn_list;
spin_lock_irqsave(&card->lock, flags);
card->msn_list = p;
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_DEBUG
"Mapping %c -> %s added\n",
eazmsn[0],
&eazmsn[1]);
return 0;
}
static void
act2000_transmit(struct work_struct *work)
{
struct act2000_card *card =
container_of(work, struct act2000_card, snd_tq);
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_send(card);
break;
case ACT2000_BUS_PCMCIA:
case ACT2000_BUS_MCA:
default:
printk(KERN_WARNING
"act2000_transmit: Illegal bustype %d\n", card->bus);
}
}
static void
act2000_receive(struct work_struct *work)
{
struct act2000_card *card =
container_of(work, struct act2000_card, poll_tq);
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_receive(card);
break;
case ACT2000_BUS_PCMCIA:
case ACT2000_BUS_MCA:
default:
printk(KERN_WARNING
"act2000_receive: Illegal bustype %d\n", card->bus);
}
}
static void
act2000_poll(unsigned long data)
{
act2000_card * card = (act2000_card *)data;
unsigned long flags;
act2000_receive(&card->poll_tq);
spin_lock_irqsave(&card->lock, flags);
mod_timer(&card->ptimer, jiffies+3);
spin_unlock_irqrestore(&card->lock, flags);
}
static int
act2000_command(act2000_card * card, isdn_ctrl * c)
{
ulong a;
act2000_chan *chan;
act2000_cdef cdef;
isdn_ctrl cmd;
char tmp[17];
int ret;
unsigned long flags;
void __user *arg;
switch (c->command) {
case ISDN_CMD_IOCTL:
memcpy(&a, c->parm.num, sizeof(ulong));
arg = (void __user *)a;
switch (c->arg) {
case ACT2000_IOCTL_LOADBOOT:
switch (card->bus) {
case ACT2000_BUS_ISA:
ret = act2000_isa_download(card,
arg);
if (!ret) {
card->flags |= ACT2000_FLAGS_LOADED;
if (!(card->flags & ACT2000_FLAGS_IVALID)) {
card->ptimer.expires = jiffies + 3;
card->ptimer.function = act2000_poll;
card->ptimer.data = (unsigned long)card;
add_timer(&card->ptimer);
}
actcapi_manufacturer_req_errh(card);
}
break;
default:
printk(KERN_WARNING
"act2000: Illegal BUS type %d\n",
card->bus);
ret = -EIO;
}
return ret;
case ACT2000_IOCTL_SETPROTO:
card->ptype = a?ISDN_PTYPE_EURO:ISDN_PTYPE_1TR6;
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return 0;
actcapi_manufacturer_req_net(card);
return 0;
case ACT2000_IOCTL_SETMSN:
if (copy_from_user(tmp, arg,
sizeof(tmp)))
return -EFAULT;
if ((ret = act2000_set_msn(card, tmp)))
return ret;
if (card->flags & ACT2000_FLAGS_RUNNING)
return(actcapi_manufacturer_req_msn(card));
return 0;
case ACT2000_IOCTL_ADDCARD:
if (copy_from_user(&cdef, arg,
sizeof(cdef)))
return -EFAULT;
if (act2000_addcard(cdef.bus, cdef.port, cdef.irq, cdef.id))
return -EIO;
return 0;
case ACT2000_IOCTL_TEST:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return 0;
default:
return -EINVAL;
}
break;
case ISDN_CMD_DIAL:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
spin_lock_irqsave(&card->lock, flags);
if (chan->fsm_state != ACT2000_STATE_NULL) {
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_WARNING "Dial on channel with state %d\n",
chan->fsm_state);
return -EBUSY;
}
if (card->ptype == ISDN_PTYPE_EURO)
tmp[0] = act2000_find_msn(card, c->parm.setup.eazmsn, 1);
else
tmp[0] = c->parm.setup.eazmsn[0];
chan->fsm_state = ACT2000_STATE_OCALL;
chan->callref = 0xffff;
spin_unlock_irqrestore(&card->lock, flags);
ret = actcapi_connect_req(card, chan, c->parm.setup.phone,
tmp[0], c->parm.setup.si1,
c->parm.setup.si2);
if (ret) {
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DHUP;
cmd.arg &= 0x0f;
card->interface.statcallb(&cmd);
}
return ret;
case ISDN_CMD_ACCEPTD:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
if (chan->fsm_state == ACT2000_STATE_ICALL)
actcapi_select_b2_protocol_req(card, chan);
return 0;
case ISDN_CMD_ACCEPTB:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return 0;
case ISDN_CMD_HANGUP:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
switch (chan->fsm_state) {
case ACT2000_STATE_ICALL:
case ACT2000_STATE_BSETUP:
actcapi_connect_resp(card, chan, 0x15);
break;
case ACT2000_STATE_ACTIVE:
actcapi_disconnect_b3_req(card, chan);
break;
}
return 0;
case ISDN_CMD_SETEAZ:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
if (strlen(c->parm.num)) {
if (card->ptype == ISDN_PTYPE_EURO) {
chan->eazmask = act2000_find_msn(card, c->parm.num, 0);
}
if (card->ptype == ISDN_PTYPE_1TR6) {
int i;
chan->eazmask = 0;
for (i = 0; i < strlen(c->parm.num); i++)
if (isdigit(c->parm.num[i]))
chan->eazmask |= (1 << (c->parm.num[i] - '0'));
}
} else
chan->eazmask = 0x3ff;
actcapi_listen_req(card);
return 0;
case ISDN_CMD_CLREAZ:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
chan->eazmask = 0;
actcapi_listen_req(card);
return 0;
case ISDN_CMD_SETL2:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
chan->l2prot = (c->arg >> 8);
return 0;
case ISDN_CMD_SETL3:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg >> 8) != ISDN_PROTO_L3_TRANS) {
printk(KERN_WARNING "L3 protocol unknown\n");
return -1;
}
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
chan->l3prot = (c->arg >> 8);
return 0;
}
return -EINVAL;
}
static int
act2000_sendbuf(act2000_card *card, int channel, int ack, struct sk_buff *skb)
{
struct sk_buff *xmit_skb;
int len;
act2000_chan *chan;
actcapi_msg *msg;
if (!(chan = find_channel(card, channel)))
return -1;
if (chan->fsm_state != ACT2000_STATE_ACTIVE)
return -1;
len = skb->len;
if ((chan->queued + len) >= ACT2000_MAX_QUEUED)
return 0;
if (!len)
return 0;
if (skb_headroom(skb) < 19) {
printk(KERN_WARNING "act2000_sendbuf: Headroom only %d\n",
skb_headroom(skb));
xmit_skb = alloc_skb(len + 19, GFP_ATOMIC);
if (!xmit_skb) {
printk(KERN_WARNING "act2000_sendbuf: Out of memory\n");
return 0;
}
skb_reserve(xmit_skb, 19);
skb_copy_from_linear_data(skb, skb_put(xmit_skb, len), len);
} else {
xmit_skb = skb_clone(skb, GFP_ATOMIC);
if (!xmit_skb) {
printk(KERN_WARNING "act2000_sendbuf: Out of memory\n");
return 0;
}
}
dev_kfree_skb(skb);
msg = (actcapi_msg *)skb_push(xmit_skb, 19);
msg->hdr.len = 19 + len;
msg->hdr.applicationID = 1;
msg->hdr.cmd.cmd = 0x86;
msg->hdr.cmd.subcmd = 0x00;
msg->hdr.msgnum = actcapi_nextsmsg(card);
msg->msg.data_b3_req.datalen = len;
msg->msg.data_b3_req.blocknr = (msg->hdr.msgnum & 0xff);
msg->msg.data_b3_req.fakencci = MAKE_NCCI(chan->plci, 0, chan->ncci);
msg->msg.data_b3_req.flags = ack; /* Will be set to 0 on actual sending */
actcapi_debug_msg(xmit_skb, 1);
chan->queued += len;
skb_queue_tail(&card->sndq, xmit_skb);
act2000_schedule_tx(card);
return len;
}
/* Read the Status-replies from the Interface */
static int
act2000_readstatus(u_char __user * buf, int len, act2000_card * card)
{
int count;
u_char __user *p;
for (p = buf, count = 0; count < len; p++, count++) {
if (card->status_buf_read == card->status_buf_write)
return count;
put_user(*card->status_buf_read++, p);
if (card->status_buf_read > card->status_buf_end)
card->status_buf_read = card->status_buf;
}
return count;
}
/*
* Find card with given driverId
*/
static inline act2000_card *
act2000_findcard(int driverid)
{
act2000_card *p = cards;
while (p) {
if (p->myid == driverid)
return p;
p = p->next;
}
return (act2000_card *) 0;
}
/*
* Wrapper functions for interface to linklevel
*/
static int
if_command(isdn_ctrl * c)
{
act2000_card *card = act2000_findcard(c->driver);
if (card)
return (act2000_command(card, c));
printk(KERN_ERR
"act2000: if_command %d called with invalid driverId %d!\n",
c->command, c->driver);
return -ENODEV;
}
static int
if_writecmd(const u_char __user *buf, int len, int id, int channel)
{
act2000_card *card = act2000_findcard(id);
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (len);
}
printk(KERN_ERR
"act2000: if_writecmd called with invalid driverId!\n");
return -ENODEV;
}
static int
if_readstatus(u_char __user * buf, int len, int id, int channel)
{
act2000_card *card = act2000_findcard(id);
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (act2000_readstatus(buf, len, card));
}
printk(KERN_ERR
"act2000: if_readstatus called with invalid driverId!\n");
return -ENODEV;
}
static int
if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
{
act2000_card *card = act2000_findcard(id);
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (act2000_sendbuf(card, channel, ack, skb));
}
printk(KERN_ERR
"act2000: if_sendbuf called with invalid driverId!\n");
return -ENODEV;
}
/*
* Allocate a new card-struct, initialize it
* link it into cards-list.
*/
static void
act2000_alloccard(int bus, int port, int irq, char *id)
{
int i;
act2000_card *card;
if (!(card = kzalloc(sizeof(act2000_card), GFP_KERNEL))) {
printk(KERN_WARNING
"act2000: (%s) Could not allocate card-struct.\n", id);
return;
}
spin_lock_init(&card->lock);
spin_lock_init(&card->mnlock);
skb_queue_head_init(&card->sndq);
skb_queue_head_init(&card->rcvq);
skb_queue_head_init(&card->ackq);
INIT_WORK(&card->snd_tq, act2000_transmit);
INIT_WORK(&card->rcv_tq, actcapi_dispatch);
INIT_WORK(&card->poll_tq, act2000_receive);
init_timer(&card->ptimer);
card->interface.owner = THIS_MODULE;
card->interface.channels = ACT2000_BCH;
card->interface.maxbufsize = 4000;
card->interface.command = if_command;
card->interface.writebuf_skb = if_sendbuf;
card->interface.writecmd = if_writecmd;
card->interface.readstat = if_readstatus;
card->interface.features =
ISDN_FEATURE_L2_X75I |
ISDN_FEATURE_L2_HDLC |
ISDN_FEATURE_L3_TRANS |
ISDN_FEATURE_P_UNKNOWN;
card->interface.hl_hdrlen = 20;
card->ptype = ISDN_PTYPE_EURO;
strlcpy(card->interface.id, id, sizeof(card->interface.id));
for (i=0; i<ACT2000_BCH; i++) {
card->bch[i].plci = 0x8000;
card->bch[i].ncci = 0x8000;
card->bch[i].l2prot = ISDN_PROTO_L2_X75I;
card->bch[i].l3prot = ISDN_PROTO_L3_TRANS;
}
card->myid = -1;
card->bus = bus;
card->port = port;
card->irq = irq;
card->next = cards;
cards = card;
}
/*
* register card at linklevel
*/
static int
act2000_registercard(act2000_card * card)
{
switch (card->bus) {
case ACT2000_BUS_ISA:
break;
case ACT2000_BUS_MCA:
case ACT2000_BUS_PCMCIA:
default:
printk(KERN_WARNING
"act2000: Illegal BUS type %d\n",
card->bus);
return -1;
}
if (!register_isdn(&card->interface)) {
printk(KERN_WARNING
"act2000: Unable to register %s\n",
card->interface.id);
return -1;
}
card->myid = card->interface.channels;
sprintf(card->regname, "act2000-isdn (%s)", card->interface.id);
return 0;
}
static void
unregister_card(act2000_card * card)
{
isdn_ctrl cmd;
cmd.command = ISDN_STAT_UNLOAD;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_release(card);
break;
case ACT2000_BUS_MCA:
case ACT2000_BUS_PCMCIA:
default:
printk(KERN_WARNING
"act2000: Invalid BUS type %d\n",
card->bus);
break;
}
}
static int
act2000_addcard(int bus, int port, int irq, char *id)
{
act2000_card *p;
act2000_card *q = NULL;
int initialized;
int added = 0;
int failed = 0;
int i;
if (!bus)
bus = ACT2000_BUS_ISA;
if (port != -1) {
/* Port defined, do fixed setup */
act2000_alloccard(bus, port, irq, id);
} else {
/* No port defined, perform autoprobing.
* This may result in more than one card detected.
*/
switch (bus) {
case ACT2000_BUS_ISA:
for (i = 0; i < ARRAY_SIZE(act2000_isa_ports); i++)
if (act2000_isa_detect(act2000_isa_ports[i])) {
printk(KERN_INFO "act2000: Detected "
"ISA card at port 0x%x\n",
act2000_isa_ports[i]);
act2000_alloccard(bus,
act2000_isa_ports[i], irq, id);
}
break;
case ACT2000_BUS_MCA:
case ACT2000_BUS_PCMCIA:
default:
printk(KERN_WARNING
"act2000: addcard: Invalid BUS type %d\n", bus);
}
}
if (!cards)
return 1;
p = cards;
while (p) {
initialized = 0;
if (!p->interface.statcallb) {
/* Not yet registered.
* Try to register and activate it.
*/
added++;
switch (p->bus) {
case ACT2000_BUS_ISA:
if (act2000_isa_detect(p->port)) {
if (act2000_registercard(p))
break;
if (act2000_isa_config_port(p, p->port)) {
printk(KERN_WARNING
"act2000: Could not request port 0x%04x\n",
p->port);
unregister_card(p);
p->interface.statcallb = NULL;
break;
}
if (act2000_isa_config_irq(p, p->irq)) {
printk(KERN_INFO
"act2000: No IRQ available, fallback to polling\n");
/* Fall back to polled operation */
p->irq = 0;
}
printk(KERN_INFO
"act2000: ISA"
"-type card at port "
"0x%04x ",
p->port);
if (p->irq)
printk("irq %d\n", p->irq);
else
printk("polled\n");
initialized = 1;
}
break;
case ACT2000_BUS_MCA:
case ACT2000_BUS_PCMCIA:
default:
printk(KERN_WARNING
"act2000: addcard: Invalid BUS type %d\n",
p->bus);
}
} else
/* Card already initialized */
initialized = 1;
if (initialized) {
/* Init OK, next card ... */
q = p;
p = p->next;
} else {
/* Init failed, remove card from list, free memory */
printk(KERN_WARNING
"act2000: Initialization of %s failed\n",
p->interface.id);
if (q) {
q->next = p->next;
kfree(p);
p = q->next;
} else {
cards = p->next;
kfree(p);
p = cards;
}
failed++;
}
}
return (added - failed);
}
#define DRIVERNAME "IBM Active 2000 ISDN driver"
static int __init act2000_init(void)
{
printk(KERN_INFO "%s\n", DRIVERNAME);
if (!cards)
act2000_addcard(act_bus, act_port, act_irq, act_id);
if (!cards)
printk(KERN_INFO "act2000: No cards defined yet\n");
return 0;
}
static void __exit act2000_exit(void)
{
act2000_card *card = cards;
act2000_card *last;
while (card) {
unregister_card(card);
del_timer(&card->ptimer);
card = card->next;
}
card = cards;
while (card) {
last = card;
card = card->next;
act2000_clear_msn(last);
kfree(last);
}
printk(KERN_INFO "%s unloaded\n", DRIVERNAME);
}
module_init(act2000_init);
module_exit(act2000_exit);
| gpl-2.0 |
varigit/kernel-VAR-SOM-AMxx | drivers/s390/block/scm_drv.c | 4484 | 1786 | /*
* Device driver for s390 storage class memory.
*
* Copyright IBM Corp. 2012
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
*/
#define KMSG_COMPONENT "scm_block"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/eadm.h>
#include "scm_blk.h"
static void scm_notify(struct scm_device *scmdev, enum scm_event event)
{
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
switch (event) {
case SCM_CHANGE:
pr_info("%lx: The capabilities of the SCM increment changed\n",
(unsigned long) scmdev->address);
SCM_LOG(2, "State changed");
SCM_LOG_STATE(2, scmdev);
break;
case SCM_AVAIL:
SCM_LOG(2, "Increment available");
SCM_LOG_STATE(2, scmdev);
scm_blk_set_available(bdev);
break;
}
}
static int scm_probe(struct scm_device *scmdev)
{
struct scm_blk_dev *bdev;
int ret;
SCM_LOG(2, "probe");
SCM_LOG_STATE(2, scmdev);
if (scmdev->attrs.oper_state != OP_STATE_GOOD)
return -EINVAL;
bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
if (!bdev)
return -ENOMEM;
dev_set_drvdata(&scmdev->dev, bdev);
ret = scm_blk_dev_setup(bdev, scmdev);
if (ret) {
dev_set_drvdata(&scmdev->dev, NULL);
kfree(bdev);
goto out;
}
out:
return ret;
}
static int scm_remove(struct scm_device *scmdev)
{
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
scm_blk_dev_cleanup(bdev);
dev_set_drvdata(&scmdev->dev, NULL);
kfree(bdev);
return 0;
}
static struct scm_driver scm_drv = {
.drv = {
.name = "scm_block",
.owner = THIS_MODULE,
},
.notify = scm_notify,
.probe = scm_probe,
.remove = scm_remove,
.handler = scm_blk_irq,
};
int __init scm_drv_init(void)
{
return scm_driver_register(&scm_drv);
}
void scm_drv_cleanup(void)
{
scm_driver_unregister(&scm_drv);
}
| gpl-2.0 |
geoffret/litmus-rt | drivers/char/pc8736x_gpio.c | 4484 | 8975 | /* linux/drivers/char/pc8736x_gpio.c
National Semiconductor PC8736x GPIO driver. Allows a user space
process to play with the GPIO pins.
Copyright (c) 2005,2006 Jim Cromie <jim.cromie@gmail.com>
adapted from linux/drivers/char/scx200_gpio.c
Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>,
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/cdev.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <linux/nsc_gpio.h>
#include <linux/platform_device.h>
#include <asm/uaccess.h>
#define DEVNAME "pc8736x_gpio"
MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
MODULE_DESCRIPTION("NatSemi/Winbond PC-8736x GPIO Pin Driver");
MODULE_LICENSE("GPL");
static int major; /* default to dynamic major */
module_param(major, int, 0);
MODULE_PARM_DESC(major, "Major device number");
static DEFINE_MUTEX(pc8736x_gpio_config_lock);
static unsigned pc8736x_gpio_base;
static u8 pc8736x_gpio_shadow[4];
#define SIO_BASE1 0x2E /* 1st command-reg to check */
#define SIO_BASE2 0x4E /* alt command-reg to check */
#define SIO_SID 0x20 /* SuperI/O ID Register */
#define SIO_SID_PC87365 0xe5 /* Expected value in ID Register for PC87365 */
#define SIO_SID_PC87366 0xe9 /* Expected value in ID Register for PC87366 */
#define SIO_CF1 0x21 /* chip config, bit0 is chip enable */
#define PC8736X_GPIO_RANGE 16 /* ioaddr range */
#define PC8736X_GPIO_CT 32 /* minors matching 4 8 bit ports */
#define SIO_UNIT_SEL 0x7 /* unit select reg */
#define SIO_UNIT_ACT 0x30 /* unit enable */
#define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */
#define SIO_VLM_UNIT 0x0D
#define SIO_TMS_UNIT 0x0E
/* config-space addrs to read/write each unit's runtime addr */
#define SIO_BASE_HADDR 0x60
#define SIO_BASE_LADDR 0x61
/* GPIO config-space pin-control addresses */
#define SIO_GPIO_PIN_SELECT 0xF0
#define SIO_GPIO_PIN_CONFIG 0xF1
#define SIO_GPIO_PIN_EVENT 0xF2
static unsigned char superio_cmd = 0;
static unsigned char selected_device = 0xFF; /* bogus start val */
/* GPIO port runtime access, functionality */
static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */
/* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */
#define PORT_OUT 0
#define PORT_IN 1
#define PORT_EVT_EN 2
#define PORT_EVT_STST 3
static struct platform_device *pdev; /* use in dev_*() */
static inline void superio_outb(int addr, int val)
{
outb_p(addr, superio_cmd);
outb_p(val, superio_cmd + 1);
}
static inline int superio_inb(int addr)
{
outb_p(addr, superio_cmd);
return inb_p(superio_cmd + 1);
}
static int pc8736x_superio_present(void)
{
int id;
/* try the 2 possible values, read a hardware reg to verify */
superio_cmd = SIO_BASE1;
id = superio_inb(SIO_SID);
if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
return superio_cmd;
superio_cmd = SIO_BASE2;
id = superio_inb(SIO_SID);
if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
return superio_cmd;
return 0;
}
static void device_select(unsigned devldn)
{
superio_outb(SIO_UNIT_SEL, devldn);
selected_device = devldn;
}
static void select_pin(unsigned iminor)
{
/* select GPIO port/pin from device minor number */
device_select(SIO_GPIO_UNIT);
superio_outb(SIO_GPIO_PIN_SELECT,
((iminor << 1) & 0xF0) | (iminor & 0x7));
}
static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits,
u32 func_slct)
{
u32 config, new_config;
mutex_lock(&pc8736x_gpio_config_lock);
device_select(SIO_GPIO_UNIT);
select_pin(index);
/* read current config value */
config = superio_inb(func_slct);
/* set new config */
new_config = (config & mask) | bits;
superio_outb(func_slct, new_config);
mutex_unlock(&pc8736x_gpio_config_lock);
return config;
}
static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits)
{
return pc8736x_gpio_configure_fn(index, mask, bits,
SIO_GPIO_PIN_CONFIG);
}
static int pc8736x_gpio_get(unsigned minor)
{
int port, bit, val;
port = minor >> 3;
bit = minor & 7;
val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
val >>= bit;
val &= 1;
dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n",
minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit,
val);
return val;
}
static void pc8736x_gpio_set(unsigned minor, int val)
{
int port, bit, curval;
minor &= 0x1f;
port = minor >> 3;
bit = minor & 7;
curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n",
pc8736x_gpio_base + port_offset[port] + PORT_OUT,
curval, bit, (curval & ~(1 << bit)), val, (val << bit));
val = (curval & ~(1 << bit)) | (val << bit);
dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)"
" %2x -> %2x\n", minor, port, bit, curval, val);
outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT);
curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val);
pc8736x_gpio_shadow[port] = val;
}
static int pc8736x_gpio_current(unsigned minor)
{
int port, bit;
minor &= 0x1f;
port = minor >> 3;
bit = minor & 7;
return ((pc8736x_gpio_shadow[port] >> bit) & 0x01);
}
static void pc8736x_gpio_change(unsigned index)
{
pc8736x_gpio_set(index, !pc8736x_gpio_current(index));
}
static struct nsc_gpio_ops pc8736x_gpio_ops = {
.owner = THIS_MODULE,
.gpio_config = pc8736x_gpio_configure,
.gpio_dump = nsc_gpio_dump,
.gpio_get = pc8736x_gpio_get,
.gpio_set = pc8736x_gpio_set,
.gpio_change = pc8736x_gpio_change,
.gpio_current = pc8736x_gpio_current
};
static int pc8736x_gpio_open(struct inode *inode, struct file *file)
{
unsigned m = iminor(inode);
file->private_data = &pc8736x_gpio_ops;
dev_dbg(&pdev->dev, "open %d\n", m);
if (m >= PC8736X_GPIO_CT)
return -EINVAL;
return nonseekable_open(inode, file);
}
static const struct file_operations pc8736x_gpio_fileops = {
.owner = THIS_MODULE,
.open = pc8736x_gpio_open,
.write = nsc_gpio_write,
.read = nsc_gpio_read,
.llseek = no_llseek,
};
static void __init pc8736x_init_shadow(void)
{
int port;
/* read the current values driven on the GPIO signals */
for (port = 0; port < 4; ++port)
pc8736x_gpio_shadow[port]
= inb_p(pc8736x_gpio_base + port_offset[port]
+ PORT_OUT);
}
static struct cdev pc8736x_gpio_cdev;
static int __init pc8736x_gpio_init(void)
{
int rc;
dev_t devid;
pdev = platform_device_alloc(DEVNAME, 0);
if (!pdev)
return -ENOMEM;
rc = platform_device_add(pdev);
if (rc) {
rc = -ENODEV;
goto undo_platform_dev_alloc;
}
dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n");
if (!pc8736x_superio_present()) {
rc = -ENODEV;
dev_err(&pdev->dev, "no device found\n");
goto undo_platform_dev_add;
}
pc8736x_gpio_ops.dev = &pdev->dev;
/* Verify that chip and it's GPIO unit are both enabled.
My BIOS does this, so I take minimum action here
*/
rc = superio_inb(SIO_CF1);
if (!(rc & 0x01)) {
rc = -ENODEV;
dev_err(&pdev->dev, "device not enabled\n");
goto undo_platform_dev_add;
}
device_select(SIO_GPIO_UNIT);
if (!superio_inb(SIO_UNIT_ACT)) {
rc = -ENODEV;
dev_err(&pdev->dev, "GPIO unit not enabled\n");
goto undo_platform_dev_add;
}
/* read the GPIO unit base addr that chip responds to */
pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8
| superio_inb(SIO_BASE_LADDR));
if (!request_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE, DEVNAME)) {
rc = -ENODEV;
dev_err(&pdev->dev, "GPIO ioport %x busy\n",
pc8736x_gpio_base);
goto undo_platform_dev_add;
}
dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base);
if (major) {
devid = MKDEV(major, 0);
rc = register_chrdev_region(devid, PC8736X_GPIO_CT, DEVNAME);
} else {
rc = alloc_chrdev_region(&devid, 0, PC8736X_GPIO_CT, DEVNAME);
major = MAJOR(devid);
}
if (rc < 0) {
dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc);
goto undo_request_region;
}
if (!major) {
major = rc;
dev_dbg(&pdev->dev, "got dynamic major %d\n", major);
}
pc8736x_init_shadow();
/* ignore minor errs, and succeed */
cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops);
cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT);
return 0;
undo_request_region:
release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
undo_platform_dev_add:
platform_device_del(pdev);
undo_platform_dev_alloc:
platform_device_put(pdev);
return rc;
}
static void __exit pc8736x_gpio_cleanup(void)
{
dev_dbg(&pdev->dev, "cleanup\n");
cdev_del(&pc8736x_gpio_cdev);
unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT);
release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
platform_device_unregister(pdev);
}
module_init(pc8736x_gpio_init);
module_exit(pc8736x_gpio_cleanup);
| gpl-2.0 |
TheNameIsNigel/android_kernel_carbon_msm8928 | arch/arm/mach-ixp4xx/coyote-setup.c | 4996 | 3397 | /*
* arch/arm/mach-ixp4xx/coyote-setup.c
*
* Board setup for ADI Engineering and IXDGP425 boards
*
* Copyright (C) 2003-2005 MontaVista Software, Inc.
*
* Author: Deepak Saxena <dsaxena@plexity.net>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#define COYOTE_IDE_BASE_PHYS IXP4XX_EXP_BUS_BASE(3)
#define COYOTE_IDE_BASE_VIRT 0xFFFE1000
#define COYOTE_IDE_REGION_SIZE 0x1000
#define COYOTE_IDE_DATA_PORT 0xFFFE10E0
#define COYOTE_IDE_CTRL_PORT 0xFFFE10FC
#define COYOTE_IDE_ERROR_PORT 0xFFFE10E2
#define IRQ_COYOTE_IDE IRQ_IXP4XX_GPIO5
static struct flash_platform_data coyote_flash_data = {
.map_name = "cfi_probe",
.width = 2,
};
static struct resource coyote_flash_resource = {
.flags = IORESOURCE_MEM,
};
static struct platform_device coyote_flash = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.platform_data = &coyote_flash_data,
},
.num_resources = 1,
.resource = &coyote_flash_resource,
};
static struct resource coyote_uart_resource = {
.start = IXP4XX_UART2_BASE_PHYS,
.end = IXP4XX_UART2_BASE_PHYS + 0x0fff,
.flags = IORESOURCE_MEM,
};
static struct plat_serial8250_port coyote_uart_data[] = {
{
.mapbase = IXP4XX_UART2_BASE_PHYS,
.membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET,
.irq = IRQ_IXP4XX_UART2,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = IXP4XX_UART_XTAL,
},
{ },
};
static struct platform_device coyote_uart = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = coyote_uart_data,
},
.num_resources = 1,
.resource = &coyote_uart_resource,
};
static struct platform_device *coyote_devices[] __initdata = {
&coyote_flash,
&coyote_uart
};
static void __init coyote_init(void)
{
ixp4xx_sys_init();
coyote_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
coyote_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_32M - 1;
*IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE;
*IXP4XX_EXP_CS1 = *IXP4XX_EXP_CS0;
if (machine_is_ixdpg425()) {
coyote_uart_data[0].membase =
(char*)(IXP4XX_UART1_BASE_VIRT + REG_OFFSET);
coyote_uart_data[0].mapbase = IXP4XX_UART1_BASE_PHYS;
coyote_uart_data[0].irq = IRQ_IXP4XX_UART1;
}
platform_add_devices(coyote_devices, ARRAY_SIZE(coyote_devices));
}
#ifdef CONFIG_ARCH_ADI_COYOTE
MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote")
/* Maintainer: MontaVista Software, Inc. */
.map_io = ixp4xx_map_io,
.init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
.init_machine = coyote_init,
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
.restart = ixp4xx_restart,
MACHINE_END
#endif
/*
* IXDPG425 is identical to Coyote except for which serial port
* is connected.
*/
#ifdef CONFIG_MACH_IXDPG425
MACHINE_START(IXDPG425, "Intel IXDPG425")
/* Maintainer: MontaVista Software, Inc. */
.map_io = ixp4xx_map_io,
.init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
.init_machine = coyote_init,
.restart = ixp4xx_restart,
MACHINE_END
#endif
| gpl-2.0 |
jawad6233/bindu-kernel-base | arch/arm/mach-rpc/riscpc.c | 4996 | 4823 | /*
* linux/arch/arm/mach-rpc/riscpc.c
*
* Copyright (C) 1998-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Architecture specific fixups.
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/serial_8250.h>
#include <linux/ata_platform.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <asm/elf.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/hardware/iomd.h>
#include <asm/page.h>
#include <asm/domain.h>
#include <asm/setup.h>
#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
extern void rpc_init_irq(void);
unsigned int vram_size;
unsigned int memc_ctrl_reg;
unsigned int number_mfm_drives;
static int __init parse_tag_acorn(const struct tag *tag)
{
memc_ctrl_reg = tag->u.acorn.memc_control_reg;
number_mfm_drives = tag->u.acorn.adfsdrives;
switch (tag->u.acorn.vram_pages) {
case 512:
vram_size += PAGE_SIZE * 256;
case 256:
vram_size += PAGE_SIZE * 256;
default:
break;
}
#if 0
if (vram_size) {
desc->video_start = 0x02000000;
desc->video_end = 0x02000000 + vram_size;
}
#endif
return 0;
}
__tagtable(ATAG_ACORN, parse_tag_acorn);
static struct map_desc rpc_io_desc[] __initdata = {
{ /* VRAM */
.virtual = SCREEN_BASE,
.pfn = __phys_to_pfn(SCREEN_START),
.length = 2*1048576,
.type = MT_DEVICE
}, { /* IO space */
.virtual = (u32)IO_BASE,
.pfn = __phys_to_pfn(IO_START),
.length = IO_SIZE ,
.type = MT_DEVICE
}, { /* EASI space */
.virtual = (unsigned long)EASI_BASE,
.pfn = __phys_to_pfn(EASI_START),
.length = EASI_SIZE,
.type = MT_DEVICE
}
};
static void __init rpc_map_io(void)
{
iotable_init(rpc_io_desc, ARRAY_SIZE(rpc_io_desc));
/*
* Turn off floppy.
*/
writeb(0xc, PCIO_BASE + (0x3f2 << 2));
/*
* RiscPC can't handle half-word loads and stores
*/
elf_hwcap &= ~HWCAP_HALF;
}
static struct resource acornfb_resources[] = {
/* VIDC */
DEFINE_RES_MEM(0x03400000, 0x00200000),
DEFINE_RES_IRQ(IRQ_VSYNCPULSE),
};
static struct platform_device acornfb_device = {
.name = "acornfb",
.id = -1,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(acornfb_resources),
.resource = acornfb_resources,
};
static struct resource iomd_resources[] = {
DEFINE_RES_MEM(0x03200000, 0x10000),
};
static struct platform_device iomd_device = {
.name = "iomd",
.id = -1,
.num_resources = ARRAY_SIZE(iomd_resources),
.resource = iomd_resources,
};
static struct resource iomd_kart_resources[] = {
DEFINE_RES_IRQ(IRQ_KEYBOARDRX),
DEFINE_RES_IRQ(IRQ_KEYBOARDTX),
};
static struct platform_device kbd_device = {
.name = "kart",
.id = -1,
.dev = {
.parent = &iomd_device.dev,
},
.num_resources = ARRAY_SIZE(iomd_kart_resources),
.resource = iomd_kart_resources,
};
static struct plat_serial8250_port serial_platform_data[] = {
{
.mapbase = 0x03010fe0,
.irq = IRQ_SERIALPORT,
.uartclk = 1843200,
.regshift = 2,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SKIP_TEST,
},
{ },
};
static struct platform_device serial_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = serial_platform_data,
},
};
static struct pata_platform_info pata_platform_data = {
.ioport_shift = 2,
};
static struct resource pata_resources[] = {
DEFINE_RES_MEM(0x030107c0, 0x20),
DEFINE_RES_MEM(0x03010fd8, 0x04),
DEFINE_RES_IRQ(IRQ_HARDDISK),
};
static struct platform_device pata_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(pata_resources),
.resource = pata_resources,
.dev = {
.platform_data = &pata_platform_data,
.coherent_dma_mask = ~0, /* grumble */
},
};
static struct platform_device *devs[] __initdata = {
&iomd_device,
&kbd_device,
&serial_device,
&acornfb_device,
&pata_device,
};
static struct i2c_board_info i2c_rtc = {
I2C_BOARD_INFO("pcf8583", 0x50)
};
static int __init rpc_init(void)
{
i2c_register_board_info(0, &i2c_rtc, 1);
return platform_add_devices(devs, ARRAY_SIZE(devs));
}
arch_initcall(rpc_init);
static void rpc_restart(char mode, const char *cmd)
{
iomd_writeb(0, IOMD_ROMCR0);
/*
* Jump into the ROM
*/
soft_restart(0);
}
extern struct sys_timer ioc_timer;
MACHINE_START(RISCPC, "Acorn-RiscPC")
/* Maintainer: Russell King */
.atag_offset = 0x100,
.reserve_lp0 = 1,
.reserve_lp1 = 1,
.map_io = rpc_map_io,
.init_irq = rpc_init_irq,
.timer = &ioc_timer,
.restart = rpc_restart,
MACHINE_END
| gpl-2.0 |
sicknemesis/expectus_kernel_hammerhead | arch/arm/mach-rpc/riscpc.c | 4996 | 4823 | /*
* linux/arch/arm/mach-rpc/riscpc.c
*
* Copyright (C) 1998-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Architecture specific fixups.
*/
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/serial_8250.h>
#include <linux/ata_platform.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <asm/elf.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/hardware/iomd.h>
#include <asm/page.h>
#include <asm/domain.h>
#include <asm/setup.h>
#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
extern void rpc_init_irq(void);
unsigned int vram_size;
unsigned int memc_ctrl_reg;
unsigned int number_mfm_drives;
static int __init parse_tag_acorn(const struct tag *tag)
{
memc_ctrl_reg = tag->u.acorn.memc_control_reg;
number_mfm_drives = tag->u.acorn.adfsdrives;
switch (tag->u.acorn.vram_pages) {
case 512:
vram_size += PAGE_SIZE * 256;
case 256:
vram_size += PAGE_SIZE * 256;
default:
break;
}
#if 0
if (vram_size) {
desc->video_start = 0x02000000;
desc->video_end = 0x02000000 + vram_size;
}
#endif
return 0;
}
__tagtable(ATAG_ACORN, parse_tag_acorn);
static struct map_desc rpc_io_desc[] __initdata = {
{ /* VRAM */
.virtual = SCREEN_BASE,
.pfn = __phys_to_pfn(SCREEN_START),
.length = 2*1048576,
.type = MT_DEVICE
}, { /* IO space */
.virtual = (u32)IO_BASE,
.pfn = __phys_to_pfn(IO_START),
.length = IO_SIZE ,
.type = MT_DEVICE
}, { /* EASI space */
.virtual = (unsigned long)EASI_BASE,
.pfn = __phys_to_pfn(EASI_START),
.length = EASI_SIZE,
.type = MT_DEVICE
}
};
static void __init rpc_map_io(void)
{
iotable_init(rpc_io_desc, ARRAY_SIZE(rpc_io_desc));
/*
* Turn off floppy.
*/
writeb(0xc, PCIO_BASE + (0x3f2 << 2));
/*
* RiscPC can't handle half-word loads and stores
*/
elf_hwcap &= ~HWCAP_HALF;
}
static struct resource acornfb_resources[] = {
/* VIDC */
DEFINE_RES_MEM(0x03400000, 0x00200000),
DEFINE_RES_IRQ(IRQ_VSYNCPULSE),
};
static struct platform_device acornfb_device = {
.name = "acornfb",
.id = -1,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(acornfb_resources),
.resource = acornfb_resources,
};
static struct resource iomd_resources[] = {
DEFINE_RES_MEM(0x03200000, 0x10000),
};
static struct platform_device iomd_device = {
.name = "iomd",
.id = -1,
.num_resources = ARRAY_SIZE(iomd_resources),
.resource = iomd_resources,
};
static struct resource iomd_kart_resources[] = {
DEFINE_RES_IRQ(IRQ_KEYBOARDRX),
DEFINE_RES_IRQ(IRQ_KEYBOARDTX),
};
static struct platform_device kbd_device = {
.name = "kart",
.id = -1,
.dev = {
.parent = &iomd_device.dev,
},
.num_resources = ARRAY_SIZE(iomd_kart_resources),
.resource = iomd_kart_resources,
};
static struct plat_serial8250_port serial_platform_data[] = {
{
.mapbase = 0x03010fe0,
.irq = IRQ_SERIALPORT,
.uartclk = 1843200,
.regshift = 2,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SKIP_TEST,
},
{ },
};
static struct platform_device serial_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = serial_platform_data,
},
};
static struct pata_platform_info pata_platform_data = {
.ioport_shift = 2,
};
static struct resource pata_resources[] = {
DEFINE_RES_MEM(0x030107c0, 0x20),
DEFINE_RES_MEM(0x03010fd8, 0x04),
DEFINE_RES_IRQ(IRQ_HARDDISK),
};
static struct platform_device pata_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(pata_resources),
.resource = pata_resources,
.dev = {
.platform_data = &pata_platform_data,
.coherent_dma_mask = ~0, /* grumble */
},
};
static struct platform_device *devs[] __initdata = {
&iomd_device,
&kbd_device,
&serial_device,
&acornfb_device,
&pata_device,
};
static struct i2c_board_info i2c_rtc = {
I2C_BOARD_INFO("pcf8583", 0x50)
};
static int __init rpc_init(void)
{
i2c_register_board_info(0, &i2c_rtc, 1);
return platform_add_devices(devs, ARRAY_SIZE(devs));
}
arch_initcall(rpc_init);
static void rpc_restart(char mode, const char *cmd)
{
iomd_writeb(0, IOMD_ROMCR0);
/*
* Jump into the ROM
*/
soft_restart(0);
}
extern struct sys_timer ioc_timer;
MACHINE_START(RISCPC, "Acorn-RiscPC")
/* Maintainer: Russell King */
.atag_offset = 0x100,
.reserve_lp0 = 1,
.reserve_lp1 = 1,
.map_io = rpc_map_io,
.init_irq = rpc_init_irq,
.timer = &ioc_timer,
.restart = rpc_restart,
MACHINE_END
| gpl-2.0 |
ryrzy/yoda-kernel-i9300-JB | lib/halfmd4.c | 5252 | 2028 | #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cryptohash.h>
/* F, G and H are basic MD4 functions: selection, majority, parity */
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
/*
* The generic round function. The application is so specific that
* we don't bother protecting all the arguments with parens, as is generally
* good macro practice, in favor of extra legibility.
* Rotation is separate from addition to prevent recomputation
*/
#define ROUND(f, a, b, c, d, x, s) \
(a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
#define K1 0
#define K2 013240474631UL
#define K3 015666365641UL
/*
* Basic cut-down MD4 transform. Returns only 32 bits of result.
*/
__u32 half_md4_transform(__u32 buf[4], __u32 const in[8])
{
__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
/* Round 1 */
ROUND(F, a, b, c, d, in[0] + K1, 3);
ROUND(F, d, a, b, c, in[1] + K1, 7);
ROUND(F, c, d, a, b, in[2] + K1, 11);
ROUND(F, b, c, d, a, in[3] + K1, 19);
ROUND(F, a, b, c, d, in[4] + K1, 3);
ROUND(F, d, a, b, c, in[5] + K1, 7);
ROUND(F, c, d, a, b, in[6] + K1, 11);
ROUND(F, b, c, d, a, in[7] + K1, 19);
/* Round 2 */
ROUND(G, a, b, c, d, in[1] + K2, 3);
ROUND(G, d, a, b, c, in[3] + K2, 5);
ROUND(G, c, d, a, b, in[5] + K2, 9);
ROUND(G, b, c, d, a, in[7] + K2, 13);
ROUND(G, a, b, c, d, in[0] + K2, 3);
ROUND(G, d, a, b, c, in[2] + K2, 5);
ROUND(G, c, d, a, b, in[4] + K2, 9);
ROUND(G, b, c, d, a, in[6] + K2, 13);
/* Round 3 */
ROUND(H, a, b, c, d, in[3] + K3, 3);
ROUND(H, d, a, b, c, in[7] + K3, 9);
ROUND(H, c, d, a, b, in[2] + K3, 11);
ROUND(H, b, c, d, a, in[6] + K3, 15);
ROUND(H, a, b, c, d, in[1] + K3, 3);
ROUND(H, d, a, b, c, in[5] + K3, 9);
ROUND(H, c, d, a, b, in[0] + K3, 11);
ROUND(H, b, c, d, a, in[4] + K3, 15);
buf[0] += a;
buf[1] += b;
buf[2] += c;
buf[3] += d;
return buf[1]; /* "most hashed" word */
}
EXPORT_SYMBOL(half_md4_transform);
| gpl-2.0 |
Dm47021/android_kernel_samsung_centura_sch738c | net/sched/sch_htb.c | 5252 | 42536 | /*
* net/sched/sch_htb.c Hierarchical token bucket, feed tree version
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Martin Devera, <devik@cdi.cz>
*
* Credits (in time order) for older HTB versions:
* Stef Coene <stef.coene@docum.org>
* HTB support at LARTC mailing list
* Ondrej Kraus, <krauso@barr.cz>
* found missing INIT_QDISC(htb)
* Vladimir Smelhaus, Aamer Akhter, Bert Hubert
* helped a lot to locate nasty class stall bug
* Andi Kleen, Jamal Hadi, Bert Hubert
* code review and helpful comments on shaping
* Tomasz Wrona, <tw@eter.tym.pl>
* created test case so that I was able to fix nasty bug
* Wilfried Weissmann
* spotted bug in dequeue code and helped with fix
* Jiri Fojtasek
* fixed requeue routine
* and many others. thanks.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/rbtree.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
/* HTB algorithm.
Author: devik@cdi.cz
========================================================================
HTB is like TBF with multiple classes. It is also similar to CBQ because
it allows to assign priority to each class in hierarchy.
In fact it is another implementation of Floyd's formal sharing.
Levels:
Each class is assigned level. Leaf has ALWAYS level 0 and root
classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
one less than their parent.
*/
static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
#endif
/* Module parameter and sysfs export */
module_param (htb_hysteresis, int, 0640);
MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
/* used internaly to keep status of single class */
enum htb_cmode {
HTB_CANT_SEND, /* class can't send and can't borrow */
HTB_MAY_BORROW, /* class can't send but may borrow */
HTB_CAN_SEND /* class can send */
};
/* interior & leaf nodes; props specific to leaves are marked L: */
struct htb_class {
struct Qdisc_class_common common;
/* general class parameters */
struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
struct tc_htb_xstats xstats; /* our special stats */
int refcnt; /* usage count of this class */
/* topology */
int level; /* our level (see above) */
unsigned int children;
struct htb_class *parent; /* parent class */
int prio; /* these two are used only by leaves... */
int quantum; /* but stored for parent-to-leaf return */
union {
struct htb_class_leaf {
struct Qdisc *q;
int deficit[TC_HTB_MAXDEPTH];
struct list_head drop_list;
} leaf;
struct htb_class_inner {
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
/* When class changes from state 1->2 and disconnects from
* parent's feed then we lost ptr value and start from the
* first child again. Here we store classid of the
* last valid ptr (used when ptr is NULL).
*/
u32 last_ptr_id[TC_HTB_NUMPRIO];
} inner;
} un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_node pq_node; /* node for event queue */
psched_time_t pq_key;
int prio_activity; /* for which prios are we active */
enum htb_cmode cmode; /* current mode of the class */
/* class attached filters */
struct tcf_proto *filter_list;
int filter_cnt;
/* token bucket parameters */
struct qdisc_rate_table *rate; /* rate table of the class itself */
struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
long buffer, cbuffer; /* token bucket depth/rate */
psched_tdiff_t mbuffer; /* max wait time */
long tokens, ctokens; /* current number of tokens */
psched_time_t t_c; /* checkpoint time */
};
struct htb_sched {
struct Qdisc_class_hash clhash;
struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
/* self list - roots of self generating tree */
struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
int row_mask[TC_HTB_MAXDEPTH];
struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
/* self wait list - roots of wait PQs per row */
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* time of nearest event per level (row) */
psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
int defcls; /* class where unclassified flows go to */
/* filters for qdisc itself */
struct tcf_proto *filter_list;
int rate2quantum; /* quant = rate / rate2quantum */
psched_time_t now; /* cached dequeue time */
struct qdisc_watchdog watchdog;
/* non shaped skbs; let them go directly thru */
struct sk_buff_head direct_queue;
int direct_qlen; /* max qlen of above */
long direct_pkts;
#define HTB_WARN_TOOMANYEVENTS 0x1
unsigned int warned; /* only one warning */
struct work_struct work;
};
/* find class in global hash table using given handle */
static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
struct Qdisc_class_common *clc;
clc = qdisc_class_find(&q->clhash, handle);
if (clc == NULL)
return NULL;
return container_of(clc, struct htb_class, common);
}
/**
* htb_classify - classify a packet into class
*
* It returns NULL if the packet should be dropped or -1 if the packet
* should be passed directly thru. In all other cases leaf class is returned.
* We allow direct class selection by classid in priority. The we examine
* filters in qdisc and in inner nodes (if higher filter points to the inner
* node). If we end up with classid MAJOR:0 we enqueue the skb into special
* internal fifo (direct). These packets then go directly thru. If we still
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
* then finish and return direct queue.
*/
#define HTB_DIRECT ((struct htb_class *)-1L)
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
int result;
/* allow to select class by setting skb->priority to valid classid;
* note that nfmark can be used too by attaching filter fw with no
* rules in it
*/
if (skb->priority == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) selected */
cl = htb_find(skb->priority, sch);
if (cl && cl->level == 0)
return cl;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
#endif
cl = (void *)res.class;
if (!cl) {
if (res.classid == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) */
cl = htb_find(res.classid, sch);
if (!cl)
break; /* filter selected invalid classid */
}
if (!cl->level)
return cl; /* we hit leaf; return it */
/* we have got inner class; apply inner filter chain */
tcf = cl->filter_list;
}
/* classification failed; try to use default class */
cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
if (!cl || cl->level)
return HTB_DIRECT; /* bad default .. this is safe bet */
return cl;
}
/**
* htb_add_to_id_tree - adds class to the round robin list
*
* Routine adds class to the list (actually tree) sorted by classid.
* Make sure that class is not already on such list for given prio.
*/
static void htb_add_to_id_tree(struct rb_root *root,
struct htb_class *cl, int prio)
{
struct rb_node **p = &root->rb_node, *parent = NULL;
while (*p) {
struct htb_class *c;
parent = *p;
c = rb_entry(parent, struct htb_class, node[prio]);
if (cl->common.classid > c->common.classid)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
rb_link_node(&cl->node[prio], parent, p);
rb_insert_color(&cl->node[prio], root);
}
/**
* htb_add_to_wait_tree - adds class to the event queue with delay
*
* The class is added to priority event queue to indicate that class will
* change its mode in cl->pq_key microseconds. Make sure that class is not
* already in the queue.
*/
static void htb_add_to_wait_tree(struct htb_sched *q,
struct htb_class *cl, long delay)
{
struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
cl->pq_key = q->now + delay;
if (cl->pq_key == q->now)
cl->pq_key++;
/* update the nearest event cache */
if (q->near_ev_cache[cl->level] > cl->pq_key)
q->near_ev_cache[cl->level] = cl->pq_key;
while (*p) {
struct htb_class *c;
parent = *p;
c = rb_entry(parent, struct htb_class, pq_node);
if (cl->pq_key >= c->pq_key)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
rb_link_node(&cl->pq_node, parent, p);
rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
}
/**
* htb_next_rb_node - finds next node in binary tree
*
* When we are past last key we return NULL.
* Average complexity is 2 steps per call.
*/
static inline void htb_next_rb_node(struct rb_node **n)
{
*n = rb_next(*n);
}
/**
* htb_add_class_to_row - add class to its row
*
* The class is added to row at priorities marked in mask.
* It does nothing if mask == 0.
*/
static inline void htb_add_class_to_row(struct htb_sched *q,
struct htb_class *cl, int mask)
{
q->row_mask[cl->level] |= mask;
while (mask) {
int prio = ffz(~mask);
mask &= ~(1 << prio);
htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
}
}
/* If this triggers, it is a bug in this code, but it need not be fatal */
static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
{
if (RB_EMPTY_NODE(rb)) {
WARN_ON(1);
} else {
rb_erase(rb, root);
RB_CLEAR_NODE(rb);
}
}
/**
* htb_remove_class_from_row - removes class from its row
*
* The class is removed from row at priorities marked in mask.
* It does nothing if mask == 0.
*/
static inline void htb_remove_class_from_row(struct htb_sched *q,
struct htb_class *cl, int mask)
{
int m = 0;
while (mask) {
int prio = ffz(~mask);
mask &= ~(1 << prio);
if (q->ptr[cl->level][prio] == cl->node + prio)
htb_next_rb_node(q->ptr[cl->level] + prio);
htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
if (!q->row[cl->level][prio].rb_node)
m |= 1 << prio;
}
q->row_mask[cl->level] &= ~m;
}
/**
* htb_activate_prios - creates active classe's feed chain
*
* The class is connected to ancestors and/or appropriate rows
* for priorities it is participating on. cl->cmode must be new
* (activated) mode. It does nothing if cl->prio_activity == 0.
*/
static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
{
struct htb_class *p = cl->parent;
long m, mask = cl->prio_activity;
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
m = mask;
while (m) {
int prio = ffz(~m);
m &= ~(1 << prio);
if (p->un.inner.feed[prio].rb_node)
/* parent already has its feed in use so that
* reset bit in mask as parent is already ok
*/
mask &= ~(1 << prio);
htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
}
p->prio_activity |= mask;
cl = p;
p = cl->parent;
}
if (cl->cmode == HTB_CAN_SEND && mask)
htb_add_class_to_row(q, cl, mask);
}
/**
* htb_deactivate_prios - remove class from feed chain
*
* cl->cmode must represent old mode (before deactivation). It does
* nothing if cl->prio_activity == 0. Class is removed from all feed
* chains and rows.
*/
static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
{
struct htb_class *p = cl->parent;
long m, mask = cl->prio_activity;
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
m = mask;
mask = 0;
while (m) {
int prio = ffz(~m);
m &= ~(1 << prio);
if (p->un.inner.ptr[prio] == cl->node + prio) {
/* we are removing child which is pointed to from
* parent feed - forget the pointer but remember
* classid
*/
p->un.inner.last_ptr_id[prio] = cl->common.classid;
p->un.inner.ptr[prio] = NULL;
}
htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
if (!p->un.inner.feed[prio].rb_node)
mask |= 1 << prio;
}
p->prio_activity &= ~mask;
cl = p;
p = cl->parent;
}
if (cl->cmode == HTB_CAN_SEND && mask)
htb_remove_class_from_row(q, cl, mask);
}
static inline long htb_lowater(const struct htb_class *cl)
{
if (htb_hysteresis)
return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
else
return 0;
}
static inline long htb_hiwater(const struct htb_class *cl)
{
if (htb_hysteresis)
return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
else
return 0;
}
/**
* htb_class_mode - computes and returns current class mode
*
* It computes cl's mode at time cl->t_c+diff and returns it. If mode
* is not HTB_CAN_SEND then cl->pq_key is updated to time difference
* from now to time when cl will change its state.
* Also it is worth to note that class mode doesn't change simply
* at cl->{c,}tokens == 0 but there can rather be hysteresis of
* 0 .. -cl->{c,}buffer range. It is meant to limit number of
* mode transitions per time unit. The speed gain is about 1/6.
*/
static inline enum htb_cmode
htb_class_mode(struct htb_class *cl, long *diff)
{
long toks;
if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
*diff = -toks;
return HTB_CANT_SEND;
}
if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
return HTB_CAN_SEND;
*diff = -toks;
return HTB_MAY_BORROW;
}
/**
* htb_change_class_mode - changes classe's mode
*
* This should be the only way how to change classe's mode under normal
* cirsumstances. Routine will update feed lists linkage, change mode
* and add class to the wait event queue if appropriate. New mode should
* be different from old one and cl->pq_key has to be valid if changing
* to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
*/
static void
htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
{
enum htb_cmode new_mode = htb_class_mode(cl, diff);
if (new_mode == cl->cmode)
return;
if (cl->prio_activity) { /* not necessary: speed optimization */
if (cl->cmode != HTB_CANT_SEND)
htb_deactivate_prios(q, cl);
cl->cmode = new_mode;
if (new_mode != HTB_CANT_SEND)
htb_activate_prios(q, cl);
} else
cl->cmode = new_mode;
}
/**
* htb_activate - inserts leaf cl into appropriate active feeds
*
* Routine learns (new) priority of leaf and activates feed chain
* for the prio. It can be called on already active leaf safely.
* It also adds leaf into droplist.
*/
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
if (!cl->prio_activity) {
cl->prio_activity = 1 << cl->prio;
htb_activate_prios(q, cl);
list_add_tail(&cl->un.leaf.drop_list,
q->drops + cl->prio);
}
}
/**
* htb_deactivate - remove leaf cl from active feeds
*
* Make sure that leaf is active. In the other words it can't be called
* with non-active leaf. It also removes class from the drop list.
*/
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{
WARN_ON(!cl->prio_activity);
htb_deactivate_prios(q, cl);
cl->prio_activity = 0;
list_del_init(&cl->un.leaf.drop_list);
}
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
int uninitialized_var(ret);
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_classify(skb, sch, &ret);
if (cl == HTB_DIRECT) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen) {
__skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++;
} else {
kfree_skb(skb);
sch->qstats.drops++;
return NET_XMIT_DROP;
}
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) {
if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
#endif
} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
cl->qstats.drops++;
}
return ret;
} else {
bstats_update(&cl->bstats, skb);
htb_activate(q, cl);
}
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
{
long toks = diff + cl->tokens;
if (toks > cl->buffer)
toks = cl->buffer;
toks -= (long) qdisc_l2t(cl->rate, bytes);
if (toks <= -cl->mbuffer)
toks = 1 - cl->mbuffer;
cl->tokens = toks;
}
static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
{
long toks = diff + cl->ctokens;
if (toks > cl->cbuffer)
toks = cl->cbuffer;
toks -= (long) qdisc_l2t(cl->ceil, bytes);
if (toks <= -cl->mbuffer)
toks = 1 - cl->mbuffer;
cl->ctokens = toks;
}
/**
* htb_charge_class - charges amount "bytes" to leaf and ancestors
*
* Routine assumes that packet "bytes" long was dequeued from leaf cl
* borrowing from "level". It accounts bytes to ceil leaky bucket for
* leaf and all ancestors and to rate bucket for ancestors at levels
* "level" and higher. It also handles possible change of mode resulting
* from the update. Note that mode can also increase here (MAY_BORROW to
* CAN_SEND) because we can use more precise clock that event queue here.
* In such case we remove class from event queue first.
*/
static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
int level, struct sk_buff *skb)
{
int bytes = qdisc_pkt_len(skb);
enum htb_cmode old_mode;
long diff;
while (cl) {
diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
if (cl->level >= level) {
if (cl->level == level)
cl->xstats.lends++;
htb_accnt_tokens(cl, bytes, diff);
} else {
cl->xstats.borrows++;
cl->tokens += diff; /* we moved t_c; update tokens */
}
htb_accnt_ctokens(cl, bytes, diff);
cl->t_c = q->now;
old_mode = cl->cmode;
diff = 0;
htb_change_class_mode(q, cl, &diff);
if (old_mode != cl->cmode) {
if (old_mode != HTB_CAN_SEND)
htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
/* update basic stats except for leaves which are already updated */
if (cl->level)
bstats_update(&cl->bstats, skb);
cl = cl->parent;
}
}
/**
* htb_do_events - make mode changes to classes at the level
*
* Scans event queue for pending events and applies them. Returns time of
* next pending event (0 for no event in pq, q->now for too many events).
* Note: Applied are events whose have cl->pq_key <= q->now.
*/
static psched_time_t htb_do_events(struct htb_sched *q, int level,
unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
* 1 to simplify things when jiffy is going to be incremented
* too soon
*/
unsigned long stop_at = start + 2;
while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
long diff;
struct rb_node *p = rb_first(&q->wait_pq[level]);
if (!p)
return 0;
cl = rb_entry(p, struct htb_class, pq_node);
if (cl->pq_key > q->now)
return cl->pq_key;
htb_safe_rb_erase(p, q->wait_pq + level);
diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
htb_change_class_mode(q, cl, &diff);
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
/* too much load - let's continue after a break for scheduling */
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
pr_warning("htb: too many events!\n");
q->warned |= HTB_WARN_TOOMANYEVENTS;
}
return q->now;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
* is no such one exists.
*/
static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
u32 id)
{
struct rb_node *r = NULL;
while (n) {
struct htb_class *cl =
rb_entry(n, struct htb_class, node[prio]);
if (id > cl->common.classid) {
n = n->rb_right;
} else if (id < cl->common.classid) {
r = n;
n = n->rb_left;
} else {
return n;
}
}
return r;
}
/**
* htb_lookup_leaf - returns next leaf class in DRR order
*
* Find leaf where current feed pointers points to.
*/
static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
struct rb_node **pptr, u32 * pid)
{
int i;
struct {
struct rb_node *root;
struct rb_node **pptr;
u32 *pid;
} stk[TC_HTB_MAXDEPTH], *sp = stk;
BUG_ON(!tree->rb_node);
sp->root = tree->rb_node;
sp->pptr = pptr;
sp->pid = pid;
for (i = 0; i < 65535; i++) {
if (!*sp->pptr && *sp->pid) {
/* ptr was invalidated but id is valid - try to recover
* the original or next ptr
*/
*sp->pptr =
htb_id_find_next_upper(prio, sp->root, *sp->pid);
}
*sp->pid = 0; /* ptr is valid now so that remove this hint as it
* can become out of date quickly
*/
if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root;
while ((*sp->pptr)->rb_left)
*sp->pptr = (*sp->pptr)->rb_left;
if (sp > stk) {
sp--;
if (!*sp->pptr) {
WARN_ON(1);
return NULL;
}
htb_next_rb_node(sp->pptr);
}
} else {
struct htb_class *cl;
cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
if (!cl->level)
return cl;
(++sp)->root = cl->un.inner.feed[prio].rb_node;
sp->pptr = cl->un.inner.ptr + prio;
sp->pid = cl->un.inner.last_ptr_id + prio;
}
}
WARN_ON(1);
return NULL;
}
/* dequeues packet at given priority and level; call only if
* you are sure that there is active class at prio/level
*/
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
int level)
{
struct sk_buff *skb = NULL;
struct htb_class *cl, *start;
/* look initial class up in the row */
start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
q->ptr[level] + prio,
q->last_ptr_id[level] + prio);
do {
next:
if (unlikely(!cl))
return NULL;
/* class can be empty - it is unlikely but can be true if leaf
* qdisc drops packets in enqueue routine or if someone used
* graft operation on the leaf since last dequeue;
* simply deactivate and skip such class
*/
if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
struct htb_class *next;
htb_deactivate(q, cl);
/* row/level might become empty */
if ((q->row_mask[level] & (1 << prio)) == 0)
return NULL;
next = htb_lookup_leaf(q->row[level] + prio,
prio, q->ptr[level] + prio,
q->last_ptr_id[level] + prio);
if (cl == start) /* fix start if we just deleted it */
start = next;
cl = next;
goto next;
}
skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
if (likely(skb != NULL))
break;
qdisc_warn_nonwc("htb", cl->un.leaf.q);
htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
ptr[0]) + prio);
cl = htb_lookup_leaf(q->row[level] + prio, prio,
q->ptr[level] + prio,
q->last_ptr_id[level] + prio);
} while (cl != start);
if (likely(skb != NULL)) {
cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
if (cl->un.leaf.deficit[level] < 0) {
cl->un.leaf.deficit[level] += cl->quantum;
htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
ptr[0]) + prio);
}
/* this used to be after charge_class but this constelation
* gives us slightly better performance
*/
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
htb_charge_class(q, cl, level, skb);
}
return skb;
}
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) {
ok:
qdisc_bstats_update(sch, skb);
qdisc_unthrottled(sch);
sch->q.qlen--;
return skb;
}
if (!sch->q.qlen)
goto fin;
q->now = psched_get_time();
start_at = jiffies;
next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
int m;
psched_time_t event;
if (q->now >= q->near_ev_cache[level]) {
event = htb_do_events(q, level, start_at);
if (!event)
event = q->now + PSCHED_TICKS_PER_SEC;
q->near_ev_cache[level] = event;
} else
event = q->near_ev_cache[level];
if (next_event > event)
next_event = event;
m = ~q->row_mask[level];
while (m != (int)(-1)) {
int prio = ffz(m);
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
if (likely(skb != NULL))
goto ok;
}
}
sch->qstats.overlimits++;
if (likely(next_event > q->now))
qdisc_watchdog_schedule(&q->watchdog, next_event);
else
schedule_work(&q->work);
fin:
return skb;
}
/* try to drop from each class (by prio) until one succeed */
static unsigned int htb_drop(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
int prio;
for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
struct list_head *p;
list_for_each(p, q->drops + prio) {
struct htb_class *cl = list_entry(p, struct htb_class,
un.leaf.drop_list);
unsigned int len;
if (cl->un.leaf.q->ops->drop &&
(len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
sch->q.qlen--;
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
return len;
}
}
}
return 0;
}
/* reset all classes */
/* always caled under BH & queue lock */
static void htb_reset(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
struct hlist_node *n;
unsigned int i;
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
if (cl->level)
memset(&cl->un.inner, 0, sizeof(cl->un.inner));
else {
if (cl->un.leaf.q)
qdisc_reset(cl->un.leaf.q);
INIT_LIST_HEAD(&cl->un.leaf.drop_list);
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
}
}
qdisc_watchdog_cancel(&q->watchdog);
__skb_queue_purge(&q->direct_queue);
sch->q.qlen = 0;
memset(q->row, 0, sizeof(q->row));
memset(q->row_mask, 0, sizeof(q->row_mask));
memset(q->wait_pq, 0, sizeof(q->wait_pq));
memset(q->ptr, 0, sizeof(q->ptr));
for (i = 0; i < TC_HTB_NUMPRIO; i++)
INIT_LIST_HEAD(q->drops + i);
}
static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
[TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
[TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
[TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
[TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
};
static void htb_work_func(struct work_struct *work)
{
struct htb_sched *q = container_of(work, struct htb_sched, work);
struct Qdisc *sch = q->watchdog.qdisc;
__netif_schedule(qdisc_root(sch));
}
static int htb_init(struct Qdisc *sch, struct nlattr *opt)
{
struct htb_sched *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HTB_INIT + 1];
struct tc_htb_glob *gopt;
int err;
int i;
if (!opt)
return -EINVAL;
err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
if (err < 0)
return err;
if (tb[TCA_HTB_INIT] == NULL) {
pr_err("HTB: hey probably you have bad tc tool ?\n");
return -EINVAL;
}
gopt = nla_data(tb[TCA_HTB_INIT]);
if (gopt->version != HTB_VER >> 16) {
pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
return -EINVAL;
}
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
return err;
for (i = 0; i < TC_HTB_NUMPRIO; i++)
INIT_LIST_HEAD(q->drops + i);
qdisc_watchdog_init(&q->watchdog, sch);
INIT_WORK(&q->work, htb_work_func);
skb_queue_head_init(&q->direct_queue);
q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
q->direct_qlen = 2;
if ((q->rate2quantum = gopt->rate2quantum) < 1)
q->rate2quantum = 1;
q->defcls = gopt->defcls;
return 0;
}
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
{
spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
struct htb_sched *q = qdisc_priv(sch);
struct nlattr *nest;
struct tc_htb_glob gopt;
spin_lock_bh(root_lock);
gopt.direct_pkts = q->direct_pkts;
gopt.version = HTB_VER;
gopt.rate2quantum = q->rate2quantum;
gopt.defcls = q->defcls;
gopt.debug = 0;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
nla_nest_end(skb, nest);
spin_unlock_bh(root_lock);
return skb->len;
nla_put_failure:
spin_unlock_bh(root_lock);
nla_nest_cancel(skb, nest);
return -1;
}
static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct htb_class *cl = (struct htb_class *)arg;
spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
struct nlattr *nest;
struct tc_htb_opt opt;
spin_lock_bh(root_lock);
tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
tcm->tcm_handle = cl->common.classid;
if (!cl->level && cl->un.leaf.q)
tcm->tcm_info = cl->un.leaf.q->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
memset(&opt, 0, sizeof(opt));
opt.rate = cl->rate->rate;
opt.buffer = cl->buffer;
opt.ceil = cl->ceil->rate;
opt.cbuffer = cl->cbuffer;
opt.quantum = cl->quantum;
opt.prio = cl->prio;
opt.level = cl->level;
NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
nla_nest_end(skb, nest);
spin_unlock_bh(root_lock);
return skb->len;
nla_put_failure:
spin_unlock_bh(root_lock);
nla_nest_cancel(skb, nest);
return -1;
}
static int
htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
{
struct htb_class *cl = (struct htb_class *)arg;
if (!cl->level && cl->un.leaf.q)
cl->qstats.qlen = cl->un.leaf.q->q.qlen;
cl->xstats.tokens = cl->tokens;
cl->xstats.ctokens = cl->ctokens;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qstats) < 0)
return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
}
static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
struct htb_class *cl = (struct htb_class *)arg;
if (cl->level)
return -EINVAL;
if (new == NULL &&
(new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
cl->common.classid)) == NULL)
return -ENOBUFS;
sch_tree_lock(sch);
*old = cl->un.leaf.q;
cl->un.leaf.q = new;
if (*old != NULL) {
qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
qdisc_reset(*old);
}
sch_tree_unlock(sch);
return 0;
}
static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
return !cl->level ? cl->un.leaf.q : NULL;
}
static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
if (cl->un.leaf.q->q.qlen == 0)
htb_deactivate(qdisc_priv(sch), cl);
}
static unsigned long htb_get(struct Qdisc *sch, u32 classid)
{
struct htb_class *cl = htb_find(classid, sch);
if (cl)
cl->refcnt++;
return (unsigned long)cl;
}
static inline int htb_parent_last_child(struct htb_class *cl)
{
if (!cl->parent)
/* the root class */
return 0;
if (cl->parent->children > 1)
/* not the last child */
return 0;
return 1;
}
static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
struct Qdisc *new_q)
{
struct htb_class *parent = cl->parent;
WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
if (parent->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
parent->level = 0;
memset(&parent->un.inner, 0, sizeof(parent->un.inner));
INIT_LIST_HEAD(&parent->un.leaf.drop_list);
parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
parent->t_c = psched_get_time();
parent->cmode = HTB_CAN_SEND;
}
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
if (!cl->level) {
WARN_ON(!cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q);
}
gen_kill_estimator(&cl->bstats, &cl->rate_est);
qdisc_put_rtab(cl->rate);
qdisc_put_rtab(cl->ceil);
tcf_destroy_chain(&cl->filter_list);
kfree(cl);
}
static void htb_destroy(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
struct hlist_node *n, *next;
struct htb_class *cl;
unsigned int i;
cancel_work_sync(&q->work);
qdisc_watchdog_cancel(&q->watchdog);
/* This line used to be after htb_destroy_class call below
* and surprisingly it worked in 2.4. But it must precede it
* because filter need its target class alive to be able to call
* unbind_filter on it (without Oops).
*/
tcf_destroy_chain(&q->filter_list);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
tcf_destroy_chain(&cl->filter_list);
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
common.hnode)
htb_destroy_class(sch, cl);
}
qdisc_class_hash_destroy(&q->clhash);
__skb_queue_purge(&q->direct_queue);
}
static int htb_delete(struct Qdisc *sch, unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
unsigned int qlen;
struct Qdisc *new_q = NULL;
int last_child = 0;
// TODO: why don't allow to delete subtree ? references ? does
// tc subsys quarantee us that in htb_destroy it holds no class
// refs so that we can remove children safely there ?
if (cl->children || cl->filter_cnt)
return -EBUSY;
if (!cl->level && htb_parent_last_child(cl)) {
new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
cl->parent->common.classid);
last_child = 1;
}
sch_tree_lock(sch);
if (!cl->level) {
qlen = cl->un.leaf.q->q.qlen;
qdisc_reset(cl->un.leaf.q);
qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
}
/* delete from hash and active; remainder in destroy_class */
qdisc_class_hash_remove(&q->clhash, &cl->common);
if (cl->parent)
cl->parent->children--;
if (cl->prio_activity)
htb_deactivate(q, cl);
if (cl->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
if (last_child)
htb_parent_to_leaf(q, cl, new_q);
BUG_ON(--cl->refcnt == 0);
/*
* This shouldn't happen: we "hold" one cops->get() when called
* from tc_ctl_tclass; the destroy method is done from cops->put().
*/
sch_tree_unlock(sch);
return 0;
}
static void htb_put(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
if (--cl->refcnt == 0)
htb_destroy_class(sch, cl);
}
static int htb_change_class(struct Qdisc *sch, u32 classid,
u32 parentid, struct nlattr **tca,
unsigned long *arg)
{
int err = -EINVAL;
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)*arg, *parent;
struct nlattr *opt = tca[TCA_OPTIONS];
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
struct nlattr *tb[__TCA_HTB_MAX];
struct tc_htb_opt *hopt;
/* extract all subattrs from opt attr */
if (!opt)
goto failure;
err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
if (err < 0)
goto failure;
err = -EINVAL;
if (tb[TCA_HTB_PARMS] == NULL)
goto failure;
parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
hopt = nla_data(tb[TCA_HTB_PARMS]);
rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
if (!rtab || !ctab)
goto failure;
if (!cl) { /* new class */
struct Qdisc *new_q;
int prio;
struct {
struct nlattr nla;
struct gnet_estimator opt;
} est = {
.nla = {
.nla_len = nla_attr_size(sizeof(est.opt)),
.nla_type = TCA_RATE,
},
.opt = {
/* 4s interval, 16s averaging constant */
.interval = 2,
.ewma_log = 2,
},
};
/* check for valid classid */
if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
htb_find(classid, sch))
goto failure;
/* check maximal depth */
if (parent && parent->parent && parent->parent->level < 2) {
pr_err("htb: tree is too deep\n");
goto failure;
}
err = -ENOBUFS;
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
goto failure;
err = gen_new_estimator(&cl->bstats, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE] ? : &est.nla);
if (err) {
kfree(cl);
goto failure;
}
cl->refcnt = 1;
cl->children = 0;
INIT_LIST_HEAD(&cl->un.leaf.drop_list);
RB_CLEAR_NODE(&cl->pq_node);
for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
RB_CLEAR_NODE(&cl->node[prio]);
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
* so that can't be used inside of sch_tree_lock
* -- thanks to Karlis Peisenieks
*/
new_q = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid);
sch_tree_lock(sch);
if (parent && !parent->level) {
unsigned int qlen = parent->un.leaf.q->q.qlen;
/* turn parent into inner node */
qdisc_reset(parent->un.leaf.q);
qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
qdisc_destroy(parent->un.leaf.q);
if (parent->prio_activity)
htb_deactivate(q, parent);
/* remove from evt list because of level change */
if (parent->cmode != HTB_CAN_SEND) {
htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
parent->cmode = HTB_CAN_SEND;
}
parent->level = (parent->parent ? parent->parent->level
: TC_HTB_MAXDEPTH) - 1;
memset(&parent->un.inner, 0, sizeof(parent->un.inner));
}
/* leaf (we) needs elementary qdisc */
cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
cl->common.classid = classid;
cl->parent = parent;
/* set class to be in HTB_CAN_SEND state */
cl->tokens = hopt->buffer;
cl->ctokens = hopt->cbuffer;
cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
cl->t_c = psched_get_time();
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
qdisc_class_hash_insert(&q->clhash, &cl->common);
if (parent)
parent->children++;
} else {
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err)
return err;
}
sch_tree_lock(sch);
}
/* it used to be a nasty bug here, we have to check that node
* is really leaf before changing cl->un.leaf !
*/
if (!cl->level) {
cl->quantum = rtab->rate.rate / q->rate2quantum;
if (!hopt->quantum && cl->quantum < 1000) {
pr_warning(
"HTB: quantum of class %X is small. Consider r2q change.\n",
cl->common.classid);
cl->quantum = 1000;
}
if (!hopt->quantum && cl->quantum > 200000) {
pr_warning(
"HTB: quantum of class %X is big. Consider r2q change.\n",
cl->common.classid);
cl->quantum = 200000;
}
if (hopt->quantum)
cl->quantum = hopt->quantum;
if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
cl->prio = TC_HTB_NUMPRIO - 1;
}
cl->buffer = hopt->buffer;
cl->cbuffer = hopt->cbuffer;
if (cl->rate)
qdisc_put_rtab(cl->rate);
cl->rate = rtab;
if (cl->ceil)
qdisc_put_rtab(cl->ceil);
cl->ceil = ctab;
sch_tree_unlock(sch);
qdisc_class_hash_grow(sch, &q->clhash);
*arg = (unsigned long)cl;
return 0;
failure:
if (rtab)
qdisc_put_rtab(rtab);
if (ctab)
qdisc_put_rtab(ctab);
return err;
}
static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
return fl;
}
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
struct htb_class *cl = htb_find(classid, sch);
/*if (cl && !cl->level) return 0;
* The line above used to be there to prevent attaching filters to
* leaves. But at least tc_index filter uses this just to get class
* for other reasons so that we have to allow for it.
* ----
* 19.6.2002 As Werner explained it is ok - bind filter is just
* another way to "lock" the class - unlike "get" this lock can
* be broken by class during destroy IIUC.
*/
if (cl)
cl->filter_cnt++;
return (unsigned long)cl;
}
static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
if (cl)
cl->filter_cnt--;
}
static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
struct hlist_node *n;
unsigned int i;
if (arg->stop)
return;
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
if (arg->count < arg->skip) {
arg->count++;
continue;
}
if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
arg->stop = 1;
return;
}
arg->count++;
}
}
}
static const struct Qdisc_class_ops htb_class_ops = {
.graft = htb_graft,
.leaf = htb_leaf,
.qlen_notify = htb_qlen_notify,
.get = htb_get,
.put = htb_put,
.change = htb_change_class,
.delete = htb_delete,
.walk = htb_walk,
.tcf_chain = htb_find_tcf,
.bind_tcf = htb_bind_filter,
.unbind_tcf = htb_unbind_filter,
.dump = htb_dump_class,
.dump_stats = htb_dump_class_stats,
};
static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.cl_ops = &htb_class_ops,
.id = "htb",
.priv_size = sizeof(struct htb_sched),
.enqueue = htb_enqueue,
.dequeue = htb_dequeue,
.peek = qdisc_peek_dequeued,
.drop = htb_drop,
.init = htb_init,
.reset = htb_reset,
.destroy = htb_destroy,
.dump = htb_dump,
.owner = THIS_MODULE,
};
static int __init htb_module_init(void)
{
return register_qdisc(&htb_qdisc_ops);
}
static void __exit htb_module_exit(void)
{
unregister_qdisc(&htb_qdisc_ops);
}
module_init(htb_module_init)
module_exit(htb_module_exit)
MODULE_LICENSE("GPL");
| gpl-2.0 |
PyYoshi/android_kernel_sharp_303sh | drivers/ata/sata_uli.c | 5508 | 6917 | /*
* sata_uli.c - ULi Electronics SATA
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
* Hardware documentation available under NDA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "sata_uli"
#define DRV_VERSION "1.3"
enum {
uli_5289 = 0,
uli_5287 = 1,
uli_5281 = 2,
uli_max_ports = 4,
/* PCI configuration registers */
ULI5287_BASE = 0x90, /* sata0 phy SCR registers */
ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */
ULI5281_BASE = 0x60, /* sata0 phy SCR registers */
ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */
};
struct uli_priv {
unsigned int scr_cfg_addr[uli_max_ports];
};
static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static const struct pci_device_id uli_pci_tbl[] = {
{ PCI_VDEVICE(AL, 0x5289), uli_5289 },
{ PCI_VDEVICE(AL, 0x5287), uli_5287 },
{ PCI_VDEVICE(AL, 0x5281), uli_5281 },
{ } /* terminate list */
};
static struct pci_driver uli_pci_driver = {
.name = DRV_NAME,
.id_table = uli_pci_tbl,
.probe = uli_init_one,
.remove = ata_pci_remove_one,
};
static struct scsi_host_template uli_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations uli_ops = {
.inherits = &ata_bmdma_port_ops,
.scr_read = uli_scr_read,
.scr_write = uli_scr_write,
.hardreset = ATA_OP_NULL,
};
static const struct ata_port_info uli_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &uli_ops,
};
MODULE_AUTHOR("Peer Chen");
MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
MODULE_VERSION(DRV_VERSION);
static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
{
struct uli_priv *hpriv = ap->host->private_data;
return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
}
static u32 uli_scr_cfg_read(struct ata_link *link, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
u32 val;
pci_read_config_dword(pdev, cfg_addr, &val);
return val;
}
static void uli_scr_cfg_write(struct ata_link *link, unsigned int scr, u32 val)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr);
pci_write_config_dword(pdev, cfg_addr, val);
}
static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = uli_scr_cfg_read(link, sc_reg);
return 0;
}
static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
return -EINVAL;
uli_scr_cfg_write(link, sc_reg, val);
return 0;
}
static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] = { &uli_port_info, NULL };
unsigned int board_idx = (unsigned int) ent->driver_data;
struct ata_host *host;
struct uli_priv *hpriv;
void __iomem * const *iomap;
struct ata_ioports *ioaddr;
int n_ports, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
n_ports = 2;
if (board_idx == uli_5287)
n_ports = 4;
/* allocate the host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
host->private_data = hpriv;
/* the first two ports are standard SFF */
rc = ata_pci_sff_init_host(host);
if (rc)
return rc;
ata_pci_bmdma_init(host);
iomap = host->iomap;
switch (board_idx) {
case uli_5287:
/* If there are four, the last two live right after
* the standard SFF ports.
*/
hpriv->scr_cfg_addr[0] = ULI5287_BASE;
hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
ioaddr = &host->ports[2]->ioaddr;
ioaddr->cmd_addr = iomap[0] + 8;
ioaddr->altstatus_addr =
ioaddr->ctl_addr = (void __iomem *)
((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4;
ioaddr->bmdma_addr = iomap[4] + 16;
hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[2],
"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
(unsigned long long)pci_resource_start(pdev, 0) + 8,
((unsigned long long)pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4,
(unsigned long long)pci_resource_start(pdev, 4) + 16);
ioaddr = &host->ports[3]->ioaddr;
ioaddr->cmd_addr = iomap[2] + 8;
ioaddr->altstatus_addr =
ioaddr->ctl_addr = (void __iomem *)
((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4;
ioaddr->bmdma_addr = iomap[4] + 24;
hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[2],
"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
(unsigned long long)pci_resource_start(pdev, 2) + 9,
((unsigned long long)pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4,
(unsigned long long)pci_resource_start(pdev, 4) + 24);
break;
case uli_5289:
hpriv->scr_cfg_addr[0] = ULI5287_BASE;
hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
break;
case uli_5281:
hpriv->scr_cfg_addr[0] = ULI5281_BASE;
hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
break;
default:
BUG();
break;
}
pci_set_master(pdev);
pci_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
static int __init uli_init(void)
{
return pci_register_driver(&uli_pci_driver);
}
static void __exit uli_exit(void)
{
pci_unregister_driver(&uli_pci_driver);
}
module_init(uli_init);
module_exit(uli_exit);
| gpl-2.0 |
MaxiCM/Samsung_STE_Kernel | drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c | 8324 | 110860 | /**
@verbatim
Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
ADDI-DATA GmbH
Dieselstrasse 3
D-77833 Ottersweier
Tel: +19(0)7223/9493-0
Fax: +49(0)7223/9493-92
http://www.addi-data.com
info@addi-data.com
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
You should also find the complete GPL in the COPYING file accompanying this source code.
@endverbatim
*/
/*
+-----------------------------------------------------------------------+
| (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
+-----------------------------------------------------------------------+
| Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
| Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
+-----------------------------------------------------------------------+
| Project : API APCI1710 | Compiler : gcc |
| Module name : PWM.C | Version : 2.96 |
+-------------------------------+---------------------------------------+
| Project manager: Eric Stolz | Date : 02/12/2002 |
+-----------------------------------------------------------------------+
| Description : APCI-1710 Wulse wide modulation module |
| |
| |
+-----------------------------------------------------------------------+
| UPDATES |
+-----------------------------------------------------------------------+
| Date | Author | Description of updates |
+-----------------------------------------------------------------------+
| 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
| | | available |
+-----------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Included files |
+----------------------------------------------------------------------------+
*/
#include "APCI1710_Pwm.h"
/*
+----------------------------------------------------------------------------+
| Function Name :INT i_APCI1710_InsnConfigPWM(struct comedi_device *dev,
struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Pwm Init and Get Pwm Initialisation |
+----------------------------------------------------------------------------+
| Input Parameters :
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnConfigPWM(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned char b_ConfigType;
int i_ReturnValue = 0;
b_ConfigType = CR_CHAN(insn->chanspec);
switch (b_ConfigType) {
case APCI1710_PWM_INIT:
i_ReturnValue = i_APCI1710_InitPWM(dev, (unsigned char) CR_AREF(insn->chanspec), /* b_ModulNbr */
(unsigned char) data[0], /* b_PWM */
(unsigned char) data[1], /* b_ClockSelection */
(unsigned char) data[2], /* b_TimingUnit */
(unsigned int) data[3], /* ul_LowTiming */
(unsigned int) data[4], /* ul_HighTiming */
(unsigned int *) &data[0], /* pul_RealLowTiming */
(unsigned int *) &data[1] /* pul_RealHighTiming */
);
break;
case APCI1710_PWM_GETINITDATA:
i_ReturnValue = i_APCI1710_GetPWMInitialisation(dev, (unsigned char) CR_AREF(insn->chanspec), /* b_ModulNbr */
(unsigned char) data[0], /* b_PWM */
(unsigned char *) &data[0], /* pb_TimingUnit */
(unsigned int *) &data[1], /* pul_LowTiming */
(unsigned int *) &data[2], /* pul_HighTiming */
(unsigned char *) &data[3], /* pb_StartLevel */
(unsigned char *) &data[4], /* pb_StopMode */
(unsigned char *) &data[5], /* pb_StopLevel */
(unsigned char *) &data[6], /* pb_ExternGate */
(unsigned char *) &data[7], /* pb_InterruptEnable */
(unsigned char *) &data[8] /* pb_Enable */
);
break;
default:
printk(" Config Parameter Wrong\n");
}
if (i_ReturnValue >= 0)
i_ReturnValue = insn->n;
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_InitPWM |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_PWM, |
| unsigned char_ b_ClockSelection, |
| unsigned char_ b_TimingUnit, |
| ULONG_ ul_LowTiming, |
| ULONG_ ul_HighTiming, |
| PULONG_ pul_RealLowTiming, |
| PULONG_ pul_RealHighTiming) |
+----------------------------------------------------------------------------+
| Task : Configure the selected PWM (b_PWM) from selected module|
| (b_ModulNbr). The ul_LowTiming, ul_HighTiming and |
| ul_TimingUnit determine the low/high timing base for |
| the period. pul_RealLowTiming, pul_RealHighTiming |
| return the real timing value. |
| You must calling this function be for you call any |
| other function witch access of the PWM. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure|
| (0 to 3) |
| unsigned char_ b_PWM : Selected PWM (0 or 1). |
| unsigned char_ b_ClockSelection : Selection from PCI bus |
| clock |
| - APCI1710_30MHZ : |
| The PC have a 30 MHz |
| PCI bus clock |
| - APCI1710_33MHZ : |
| The PC have a 33 MHz |
| PCI bus clock |
| - APCI1710_40MHZ |
| The APCI-1710 have a |
| integrated 40Mhz |
| quartz. |
| unsigned char_ b_TimingUnit : Base timing Unit (0 to 4) |
| 0 : ns |
| 1 : æs |
| 2 : ms |
| 3 : s |
| 4 : mn |
| ULONG_ ul_LowTiming : Low base timing value. |
| ULONG_ ul_HighTiming : High base timing value. |
+----------------------------------------------------------------------------+
| Output Parameters : PULONG_ pul_RealLowTiming : Real low base timing |
| value. |
| PULONG_ pul_RealHighTiming : Real high base timing |
| value. |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a PWM module |
| -4: PWM selection is wrong |
| -5: The selected input clock is wrong |
| -6: Timing Unit selection is wrong |
| -7: Low base timing selection is wrong |
| -8: High base timing selection is wrong |
| -9: You can not used the 40MHz clock selection with |
| this board |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InitPWM(struct comedi_device *dev,
unsigned char b_ModulNbr,
unsigned char b_PWM,
unsigned char b_ClockSelection,
unsigned char b_TimingUnit,
unsigned int ul_LowTiming,
unsigned int ul_HighTiming,
unsigned int *pul_RealLowTiming, unsigned int *pul_RealHighTiming)
{
int i_ReturnValue = 0;
unsigned int ul_LowTimerValue = 0;
unsigned int ul_HighTimerValue = 0;
unsigned int dw_Command;
double d_RealLowTiming = 0;
double d_RealHighTiming = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/***************/
/* Test if PWM */
/***************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_PWM) {
/**************************/
/* Test the PWM selection */
/**************************/
if (b_PWM <= 1) {
/******************/
/* Test the clock */
/******************/
if ((b_ClockSelection == APCI1710_30MHZ) ||
(b_ClockSelection == APCI1710_33MHZ) ||
(b_ClockSelection == APCI1710_40MHZ)) {
/************************/
/* Test the timing unit */
/************************/
if (b_TimingUnit <= 4) {
/*********************************/
/* Test the low timing selection */
/*********************************/
if (((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 0)
&& (ul_LowTiming
>= 266)
&& (ul_LowTiming
<=
0xFFFFFFFFUL))
|| ((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 1)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
571230650UL))
|| ((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 2)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
571230UL))
|| ((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 3)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
571UL))
|| ((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 4)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<= 9UL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 0)
&& (ul_LowTiming
>= 242)
&& (ul_LowTiming
<=
0xFFFFFFFFUL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 1)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
519691043UL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 2)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
519691UL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 3)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
520UL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 4)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<= 8UL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 0)
&& (ul_LowTiming
>= 200)
&& (ul_LowTiming
<=
0xFFFFFFFFUL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 1)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
429496729UL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 2)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
429496UL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 3)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
429UL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 4)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
7UL))) {
/**********************************/
/* Test the High timing selection */
/**********************************/
if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 266) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 242) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 200) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 7UL))) {
/**************************/
/* Test the board version */
/**************************/
if (((b_ClockSelection == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_ClockSelection != APCI1710_40MHZ)) {
/************************************/
/* Calculate the low division fator */
/************************************/
fpu_begin
();
switch (b_TimingUnit) {
/******/
/* ns */
/******/
case 0:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
(unsigned int)
(ul_LowTiming
*
(0.00025 * b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_LowTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(0.00025 * (double)b_ClockSelection));
d_RealLowTiming
=
(double)
ul_LowTimerValue
/
(0.00025
*
(double)
b_ClockSelection);
if ((double)((double)ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
*pul_RealLowTiming
=
*pul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
/******/
/* æs */
/******/
case 1:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
(unsigned int)
(ul_LowTiming
*
(0.25 * b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_LowTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(0.25 * (double)b_ClockSelection));
d_RealLowTiming
=
(double)
ul_LowTimerValue
/
(
(double)
0.25
*
(double)
b_ClockSelection);
if ((double)((double)ul_LowTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
*pul_RealLowTiming
=
*pul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
/******/
/* ms */
/******/
case 2:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
ul_LowTiming
*
(250.0
*
b_ClockSelection);
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_LowTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(250.0 * (double)b_ClockSelection));
d_RealLowTiming
=
(double)
ul_LowTimerValue
/
(250.0
*
(double)
b_ClockSelection);
if ((double)((double)ul_LowTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
*pul_RealLowTiming
=
*pul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
/*****/
/* s */
/*****/
case 3:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
(unsigned int)
(ul_LowTiming
*
(250000.0
*
b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_LowTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(250000.0
*
(double)
b_ClockSelection));
d_RealLowTiming
=
(double)
ul_LowTimerValue
/
(250000.0
*
(double)
b_ClockSelection);
if ((double)((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
*pul_RealLowTiming
=
*pul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
/******/
/* mn */
/******/
case 4:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
(unsigned int)
(
(ul_LowTiming
*
60)
*
(250000.0
*
b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)(ul_LowTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(250000.0
*
(double)
b_ClockSelection))
/
60;
d_RealLowTiming
=
(
(double)
ul_LowTimerValue
/
(250000.0
*
(double)
b_ClockSelection))
/
60.0;
if ((double)(((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)*pul_RealLowTiming + 0.5)) {
*pul_RealLowTiming
=
*pul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
}
/*************************************/
/* Calculate the high division fator */
/*************************************/
switch (b_TimingUnit) {
/******/
/* ns */
/******/
case 0:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
(unsigned int)
(ul_HighTiming
*
(0.00025 * b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_HighTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(0.00025 * (double)b_ClockSelection));
d_RealHighTiming
=
(double)
ul_HighTimerValue
/
(0.00025
*
(double)
b_ClockSelection);
if ((double)((double)ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
*pul_RealHighTiming
=
*pul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
/******/
/* æs */
/******/
case 1:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
(unsigned int)
(ul_HighTiming
*
(0.25 * b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_HighTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(0.25 * (double)b_ClockSelection));
d_RealHighTiming
=
(double)
ul_HighTimerValue
/
(
(double)
0.25
*
(double)
b_ClockSelection);
if ((double)((double)ul_HighTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
*pul_RealHighTiming
=
*pul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
/******/
/* ms */
/******/
case 2:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
ul_HighTiming
*
(250.0
*
b_ClockSelection);
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_HighTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(250.0 * (double)b_ClockSelection));
d_RealHighTiming
=
(double)
ul_HighTimerValue
/
(250.0
*
(double)
b_ClockSelection);
if ((double)((double)ul_HighTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
*pul_RealHighTiming
=
*pul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
/*****/
/* s */
/*****/
case 3:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
(unsigned int)
(ul_HighTiming
*
(250000.0
*
b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_HighTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(250000.0
*
(double)
b_ClockSelection));
d_RealHighTiming
=
(double)
ul_HighTimerValue
/
(250000.0
*
(double)
b_ClockSelection);
if ((double)((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
*pul_RealHighTiming
=
*pul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
/******/
/* mn */
/******/
case 4:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
(unsigned int)
(
(ul_HighTiming
*
60)
*
(250000.0
*
b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)(ul_HighTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(250000.0
*
(double)
b_ClockSelection))
/
60;
d_RealHighTiming
=
(
(double)
ul_HighTimerValue
/
(250000.0
*
(double)
b_ClockSelection))
/
60.0;
if ((double)(((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)*pul_RealHighTiming + 0.5)) {
*pul_RealHighTiming
=
*pul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
}
fpu_end();
/****************************/
/* Save the clock selection */
/****************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
b_ClockSelection
=
b_ClockSelection;
/************************/
/* Save the timing unit */
/************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
b_TimingUnit
=
b_TimingUnit;
/****************************/
/* Save the low base timing */
/****************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
d_LowTiming
=
d_RealLowTiming;
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
ul_RealLowTiming
=
*pul_RealLowTiming;
/****************************/
/* Save the high base timing */
/****************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
d_HighTiming
=
d_RealHighTiming;
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
ul_RealHighTiming
=
*pul_RealHighTiming;
/************************/
/* Write the low timing */
/************************/
outl(ul_LowTimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (20 * b_PWM) + (64 * b_ModulNbr));
/*************************/
/* Write the high timing */
/*************************/
outl(ul_HighTimerValue, devpriv->s_BoardInfos.ui_Address + 4 + (20 * b_PWM) + (64 * b_ModulNbr));
/***************************/
/* Set the clock selection */
/***************************/
dw_Command
=
inl
(devpriv->
s_BoardInfos.
ui_Address
+
8
+
(20 * b_PWM) + (64 * b_ModulNbr));
dw_Command
=
dw_Command
&
0x7F;
if (b_ClockSelection == APCI1710_40MHZ) {
dw_Command
=
dw_Command
|
0x80;
}
/***************************/
/* Set the clock selection */
/***************************/
outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr));
/*************/
/* PWM init. */
/*************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
b_PWMInit
=
1;
} else {
/***************************************************/
/* You can not used the 40MHz clock selection with */
/* this board */
/***************************************************/
DPRINTK("You can not used the 40MHz clock selection with this board\n");
i_ReturnValue
=
-9;
}
} else {
/***************************************/
/* High base timing selection is wrong */
/***************************************/
DPRINTK("High base timing selection is wrong\n");
i_ReturnValue =
-8;
}
} else {
/**************************************/
/* Low base timing selection is wrong */
/**************************************/
DPRINTK("Low base timing selection is wrong\n");
i_ReturnValue = -7;
}
} /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
else {
/**********************************/
/* Timing unit selection is wrong */
/**********************************/
DPRINTK("Timing unit selection is wrong\n");
i_ReturnValue = -6;
} /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
} /* if ((b_ClockSelection == APCI1710_30MHZ) || (b_ClockSelection == APCI1710_33MHZ) || (b_ClockSelection == APCI1710_40MHZ)) */
else {
/*******************************/
/* The selected clock is wrong */
/*******************************/
DPRINTK("The selected clock is wrong\n");
i_ReturnValue = -5;
} /* if ((b_ClockSelection == APCI1710_30MHZ) || (b_ClockSelection == APCI1710_33MHZ) || (b_ClockSelection == APCI1710_40MHZ)) */
} /* if (b_PWM >= 0 && b_PWM <= 1) */
else {
/******************************/
/* Tor PWM selection is wrong */
/******************************/
DPRINTK("Tor PWM selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_PWM >= 0 && b_PWM <= 1) */
} else {
/**********************************/
/* The module is not a PWM module */
/**********************************/
DPRINTK("The module is not a PWM module\n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_GetPWMInitialisation |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_PWM, |
| unsigned char *_ pb_TimingUnit, |
| PULONG_ pul_LowTiming, |
| PULONG_ pul_HighTiming, |
| unsigned char *_ pb_StartLevel, |
| unsigned char *_ pb_StopMode, |
| unsigned char *_ pb_StopLevel, |
| unsigned char *_ pb_ExternGate, |
| unsigned char *_ pb_InterruptEnable, |
| unsigned char *_ pb_Enable) |
+----------------------------------------------------------------------------+
| Task : Return the PWM (b_PWM) initialisation from selected |
| module (b_ModulNbr). You must calling the |
| "i_APCI1710_InitPWM" function be for you call this |
| function. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
| unsigned char_ b_PWM : Selected PWM (0 or 1) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_TimingUnit : Base timing Unit (0 to 4) |
| 0 : ns |
| 1 : æs |
| 2 : ms |
| 3 : s |
| 4 : mn |
| PULONG_ pul_LowTiming : Low base timing value. |
| PULONG_ pul_HighTiming : High base timing value. |
| unsigned char *_ pb_StartLevel : Start period level |
| selection |
| 0 : The period start |
| with a low level |
| 1 : The period start |
| with a high level|
| unsigned char *_ pb_StopMode : Stop mode selection |
| 0 : The PWM is stopped |
| directly after the |
| "i_APCI1710_DisablePWM"|
| function and break the|
| last period |
| 1 : After the |
| "i_APCI1710_DisablePWM"|
| function the PWM is |
| stopped at the end |
| from last period cycle|
| unsigned char *_ pb_StopLevel : Stop PWM level selection |
| 0 : The output signal |
| keep the level after|
| the |
| "i_APCI1710_DisablePWM"|
| function |
| 1 : The output signal is|
| set to low after the|
| "i_APCI1710_DisablePWM"|
| function |
| 2 : The output signal is|
| set to high after |
| the |
| "i_APCI1710_DisablePWM"|
| function |
| unsigned char *_ pb_ExternGate : Extern gate action |
| selection |
| 0 : Extern gate signal |
| not used. |
| 1 : Extern gate signal |
| used. |
| unsigned char *_ pb_InterruptEnable : Enable or disable the PWM |
| interrupt. |
| - APCI1710_ENABLE : |
| Enable the PWM interrupt|
| A interrupt occur after |
| each period |
| - APCI1710_DISABLE : |
| Disable the PWM |
| interrupt |
| unsigned char *_ pb_Enable : Indicate if the PWM is |
| enabled or no |
| 0 : PWM not enabled |
| 1 : PWM enabled |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a PWM module |
| -4: PWM selection is wrong |
| -5: PWM not initialised see function |
| "i_APCI1710_InitPWM" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_GetPWMInitialisation(struct comedi_device *dev,
unsigned char b_ModulNbr,
unsigned char b_PWM,
unsigned char *pb_TimingUnit,
unsigned int *pul_LowTiming,
unsigned int *pul_HighTiming,
unsigned char *pb_StartLevel,
unsigned char *pb_StopMode,
unsigned char *pb_StopLevel,
unsigned char *pb_ExternGate, unsigned char *pb_InterruptEnable, unsigned char *pb_Enable)
{
int i_ReturnValue = 0;
unsigned int dw_Status;
unsigned int dw_Command;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/***************/
/* Test if PWM */
/***************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_PWM) {
/**************************/
/* Test the PWM selection */
/**************************/
if (b_PWM <= 1) {
/***************************/
/* Test if PWM initialised */
/***************************/
dw_Status = inl(devpriv->s_BoardInfos.
ui_Address + 12 + (20 * b_PWM) +
(64 * b_ModulNbr));
if (dw_Status & 0x10) {
/***********************/
/* Read the low timing */
/***********************/
*pul_LowTiming =
inl(devpriv->s_BoardInfos.
ui_Address + 0 + (20 * b_PWM) +
(64 * b_ModulNbr));
/************************/
/* Read the high timing */
/************************/
*pul_HighTiming =
inl(devpriv->s_BoardInfos.
ui_Address + 4 + (20 * b_PWM) +
(64 * b_ModulNbr));
/********************/
/* Read the command */
/********************/
dw_Command = inl(devpriv->s_BoardInfos.
ui_Address + 8 + (20 * b_PWM) +
(64 * b_ModulNbr));
*pb_StartLevel =
(unsigned char) ((dw_Command >> 5) & 1);
*pb_StopMode =
(unsigned char) ((dw_Command >> 0) & 1);
*pb_StopLevel =
(unsigned char) ((dw_Command >> 1) & 1);
*pb_ExternGate =
(unsigned char) ((dw_Command >> 4) & 1);
*pb_InterruptEnable =
(unsigned char) ((dw_Command >> 3) & 1);
if (*pb_StopLevel) {
*pb_StopLevel =
*pb_StopLevel +
(unsigned char) ((dw_Command >>
2) & 1);
}
/********************/
/* Read the command */
/********************/
dw_Command = inl(devpriv->s_BoardInfos.
ui_Address + 8 + (20 * b_PWM) +
(64 * b_ModulNbr));
*pb_Enable =
(unsigned char) ((dw_Command >> 0) & 1);
*pb_TimingUnit = devpriv->
s_ModuleInfo[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo[b_PWM].b_TimingUnit;
} /* if (dw_Status & 0x10) */
else {
/***********************/
/* PWM not initialised */
/***********************/
DPRINTK("PWM not initialised\n");
i_ReturnValue = -5;
} /* if (dw_Status & 0x10) */
} /* if (b_PWM >= 0 && b_PWM <= 1) */
else {
/******************************/
/* Tor PWM selection is wrong */
/******************************/
DPRINTK("Tor PWM selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_PWM >= 0 && b_PWM <= 1) */
} else {
/**********************************/
/* The module is not a PWM module */
/**********************************/
DPRINTK("The module is not a PWM module\n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name :INT i_APCI1710_InsnWritePWM(struct comedi_device *dev,
struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Pwm Enable Disable and Set New Timing |
+----------------------------------------------------------------------------+
| Input Parameters :
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnWritePWM(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned char b_WriteType;
int i_ReturnValue = 0;
b_WriteType = CR_CHAN(insn->chanspec);
switch (b_WriteType) {
case APCI1710_PWM_ENABLE:
i_ReturnValue = i_APCI1710_EnablePWM(dev,
(unsigned char) CR_AREF(insn->chanspec),
(unsigned char) data[0],
(unsigned char) data[1],
(unsigned char) data[2],
(unsigned char) data[3], (unsigned char) data[4], (unsigned char) data[5]);
break;
case APCI1710_PWM_DISABLE:
i_ReturnValue = i_APCI1710_DisablePWM(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
break;
case APCI1710_PWM_NEWTIMING:
i_ReturnValue = i_APCI1710_SetNewPWMTiming(dev,
(unsigned char) CR_AREF(insn->chanspec),
(unsigned char) data[0],
(unsigned char) data[1], (unsigned int) data[2], (unsigned int) data[3]);
break;
default:
printk("Write Config Parameter Wrong\n");
}
if (i_ReturnValue >= 0)
i_ReturnValue = insn->n;
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_EnablePWM |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_PWM, |
| unsigned char_ b_StartLevel, |
| unsigned char_ b_StopMode, |
| unsigned char_ b_StopLevel, |
| unsigned char_ b_ExternGate, |
| unsigned char_ b_InterruptEnable) |
+----------------------------------------------------------------------------+
| Task : Enable the selected PWM (b_PWM) from selected module |
| (b_ModulNbr). You must calling the "i_APCI1710_InitPWM"|
| function be for you call this function. |
| If you enable the PWM interrupt, the PWM generate a |
| interrupt after each period. |
| See function "i_APCI1710_SetBoardIntRoutineX" and the |
| Interrupt mask description chapter. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Selected module number |
| (0 to 3) |
| unsigned char_ b_PWM : Selected PWM (0 or 1) |
| unsigned char_ b_StartLevel : Start period level selection |
| 0 : The period start with a |
| low level |
| 1 : The period start with a |
| high level |
| unsigned char_ b_StopMode : Stop mode selection |
| 0 : The PWM is stopped |
| directly after the |
| "i_APCI1710_DisablePWM" |
| function and break the |
| last period |
| 1 : After the |
| "i_APCI1710_DisablePWM" |
| function the PWM is |
| stopped at the end from|
| last period cycle. |
| unsigned char_ b_StopLevel : Stop PWM level selection |
| 0 : The output signal keep |
| the level after the |
| "i_APCI1710_DisablePWM" |
| function |
| 1 : The output signal is set|
| to low after the |
| "i_APCI1710_DisablePWM" |
| function |
| 2 : The output signal is set|
| to high after the |
| "i_APCI1710_DisablePWM" |
| function |
| unsigned char_ b_ExternGate : Extern gate action selection |
| 0 : Extern gate signal not |
| used. |
| 1 : Extern gate signal used.|
| unsigned char_ b_InterruptEnable : Enable or disable the PWM |
| interrupt. |
| - APCI1710_ENABLE : |
| Enable the PWM interrupt |
| A interrupt occur after |
| each period |
| - APCI1710_DISABLE : |
| Disable the PWM interrupt |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a PWM module |
| -4: PWM selection is wrong |
| -5: PWM not initialised see function |
| "i_APCI1710_InitPWM" |
| -6: PWM start level selection is wrong |
| -7: PWM stop mode selection is wrong |
| -8: PWM stop level selection is wrong |
| -9: Extern gate signal selection is wrong |
| -10: Interrupt parameter is wrong |
| -11: Interrupt function not initialised. |
| See function "i_APCI1710_SetBoardIntRoutineX" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_EnablePWM(struct comedi_device *dev,
unsigned char b_ModulNbr,
unsigned char b_PWM,
unsigned char b_StartLevel,
unsigned char b_StopMode,
unsigned char b_StopLevel, unsigned char b_ExternGate, unsigned char b_InterruptEnable)
{
int i_ReturnValue = 0;
unsigned int dw_Status;
unsigned int dw_Command;
devpriv->tsk_Current = current; /* Save the current process task structure */
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/***************/
/* Test if PWM */
/***************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_PWM) {
/**************************/
/* Test the PWM selection */
/**************************/
if (b_PWM <= 1) {
/***************************/
/* Test if PWM initialised */
/***************************/
dw_Status = inl(devpriv->s_BoardInfos.
ui_Address + 12 + (20 * b_PWM) +
(64 * b_ModulNbr));
if (dw_Status & 0x10) {
/**********************************/
/* Test the start level selection */
/**********************************/
if (b_StartLevel <= 1) {
/**********************/
/* Test the stop mode */
/**********************/
if (b_StopMode <= 1) {
/***********************/
/* Test the stop level */
/***********************/
if (b_StopLevel <= 2) {
/*****************************/
/* Test the extern gate mode */
/*****************************/
if (b_ExternGate
<= 1) {
/*****************************/
/* Test the interrupt action */
/*****************************/
if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) {
/******************************************/
/* Test if interrupt function initialised */
/******************************************/
/********************/
/* Read the command */
/********************/
dw_Command
=
inl
(devpriv->
s_BoardInfos.
ui_Address
+
8
+
(20 * b_PWM) + (64 * b_ModulNbr));
dw_Command
=
dw_Command
&
0x80;
/********************/
/* Make the command */
/********************/
dw_Command
=
dw_Command
|
b_StopMode
|
(b_InterruptEnable
<<
3)
|
(b_ExternGate
<<
4)
|
(b_StartLevel
<<
5);
if (b_StopLevel & 3) {
dw_Command
=
dw_Command
|
2;
if (b_StopLevel & 2) {
dw_Command
=
dw_Command
|
4;
}
}
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
b_InterruptEnable
=
b_InterruptEnable;
/*******************/
/* Set the command */
/*******************/
outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr));
/******************/
/* Enable the PWM */
/******************/
outl(1, devpriv->s_BoardInfos.ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr));
} /* if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) */
else {
/********************************/
/* Interrupt parameter is wrong */
/********************************/
DPRINTK("Interrupt parameter is wrong\n");
i_ReturnValue
=
-10;
} /* if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) */
} /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
else {
/*****************************************/
/* Extern gate signal selection is wrong */
/*****************************************/
DPRINTK("Extern gate signal selection is wrong\n");
i_ReturnValue
=
-9;
} /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
} /* if (b_StopLevel >= 0 && b_StopLevel <= 2) */
else {
/*************************************/
/* PWM stop level selection is wrong */
/*************************************/
DPRINTK("PWM stop level selection is wrong\n");
i_ReturnValue =
-8;
} /* if (b_StopLevel >= 0 && b_StopLevel <= 2) */
} /* if (b_StopMode >= 0 && b_StopMode <= 1) */
else {
/************************************/
/* PWM stop mode selection is wrong */
/************************************/
DPRINTK("PWM stop mode selection is wrong\n");
i_ReturnValue = -7;
} /* if (b_StopMode >= 0 && b_StopMode <= 1) */
} /* if (b_StartLevel >= 0 && b_StartLevel <= 1) */
else {
/**************************************/
/* PWM start level selection is wrong */
/**************************************/
DPRINTK("PWM start level selection is wrong\n");
i_ReturnValue = -6;
} /* if (b_StartLevel >= 0 && b_StartLevel <= 1) */
} /* if (dw_Status & 0x10) */
else {
/***********************/
/* PWM not initialised */
/***********************/
DPRINTK("PWM not initialised\n");
i_ReturnValue = -5;
} /* if (dw_Status & 0x10) */
} /* if (b_PWM >= 0 && b_PWM <= 1) */
else {
/******************************/
/* Tor PWM selection is wrong */
/******************************/
DPRINTK("Tor PWM selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_PWM >= 0 && b_PWM <= 1) */
} else {
/**********************************/
/* The module is not a PWM module */
/**********************************/
DPRINTK("The module is not a PWM module\n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_DisablePWM (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_PWM) |
+----------------------------------------------------------------------------+
| Task : Disable the selected PWM (b_PWM) from selected module |
| (b_ModulNbr). The output signal level depend of the |
| initialisation by the "i_APCI1710_EnablePWM". |
| See the b_StartLevel, b_StopMode and b_StopLevel |
| parameters from this function. |
+----------------------------------------------------------------------------+
| Input Parameters :BYTE_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
| unsigned char_ b_PWM : Selected PWM (0 or 1) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a PWM module |
| -4: PWM selection is wrong |
| -5: PWM not initialised see function |
| "i_APCI1710_InitPWM" |
| -6: PWM not enabled see function |
| "i_APCI1710_EnablePWM" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_DisablePWM(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_PWM)
{
int i_ReturnValue = 0;
unsigned int dw_Status;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/***************/
/* Test if PWM */
/***************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_PWM) {
/**************************/
/* Test the PWM selection */
/**************************/
if (b_PWM <= 1) {
/***************************/
/* Test if PWM initialised */
/***************************/
dw_Status = inl(devpriv->s_BoardInfos.
ui_Address + 12 + (20 * b_PWM) +
(64 * b_ModulNbr));
if (dw_Status & 0x10) {
/***********************/
/* Test if PWM enabled */
/***********************/
if (dw_Status & 0x1) {
/*******************/
/* Disable the PWM */
/*******************/
outl(0, devpriv->s_BoardInfos.
ui_Address + 12 +
(20 * b_PWM) +
(64 * b_ModulNbr));
} /* if (dw_Status & 0x1) */
else {
/*******************/
/* PWM not enabled */
/*******************/
DPRINTK("PWM not enabled\n");
i_ReturnValue = -6;
} /* if (dw_Status & 0x1) */
} /* if (dw_Status & 0x10) */
else {
/***********************/
/* PWM not initialised */
/***********************/
DPRINTK(" PWM not initialised\n");
i_ReturnValue = -5;
} /* if (dw_Status & 0x10) */
} /* if (b_PWM >= 0 && b_PWM <= 1) */
else {
/******************************/
/* Tor PWM selection is wrong */
/******************************/
DPRINTK("Tor PWM selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_PWM >= 0 && b_PWM <= 1) */
} else {
/**********************************/
/* The module is not a PWM module */
/**********************************/
DPRINTK("The module is not a PWM module\n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_SetNewPWMTiming |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_PWM, |
| unsigned char_ b_ClockSelection, |
| unsigned char_ b_TimingUnit, |
| ULONG_ ul_LowTiming, |
| ULONG_ ul_HighTiming) |
+----------------------------------------------------------------------------+
| Task : Set a new timing. The ul_LowTiming, ul_HighTiming and |
| ul_TimingUnit determine the low/high timing base for |
| the period. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure|
| (0 to 3) |
| unsigned char_ b_PWM : Selected PWM (0 or 1). |
| unsigned char_ b_TimingUnit : Base timing Unit (0 to 4) |
| 0 : ns |
| 1 : æs |
| 2 : ms |
| 3 : s |
| 4 : mn |
| ULONG_ ul_LowTiming : Low base timing value. |
| ULONG_ ul_HighTiming : High base timing value. |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a PWM module |
| -4: PWM selection is wrong |
| -5: PWM not initialised |
| -6: Timing Unit selection is wrong |
| -7: Low base timing selection is wrong |
| -8: High base timing selection is wrong |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_SetNewPWMTiming(struct comedi_device *dev,
unsigned char b_ModulNbr,
unsigned char b_PWM, unsigned char b_TimingUnit, unsigned int ul_LowTiming, unsigned int ul_HighTiming)
{
unsigned char b_ClockSelection;
int i_ReturnValue = 0;
unsigned int ul_LowTimerValue = 0;
unsigned int ul_HighTimerValue = 0;
unsigned int ul_RealLowTiming = 0;
unsigned int ul_RealHighTiming = 0;
unsigned int dw_Status;
unsigned int dw_Command;
double d_RealLowTiming = 0;
double d_RealHighTiming = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/***************/
/* Test if PWM */
/***************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_PWM) {
/**************************/
/* Test the PWM selection */
/**************************/
if (b_PWM <= 1) {
/***************************/
/* Test if PWM initialised */
/***************************/
dw_Status = inl(devpriv->s_BoardInfos.
ui_Address + 12 + (20 * b_PWM) +
(64 * b_ModulNbr));
if (dw_Status & 0x10) {
b_ClockSelection = devpriv->
s_ModuleInfo[b_ModulNbr].
s_PWMModuleInfo.
b_ClockSelection;
/************************/
/* Test the timing unit */
/************************/
if (b_TimingUnit <= 4) {
/*********************************/
/* Test the low timing selection */
/*********************************/
if (((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 0)
&& (ul_LowTiming
>= 266)
&& (ul_LowTiming
<=
0xFFFFFFFFUL))
|| ((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 1)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
571230650UL))
|| ((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 2)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
571230UL))
|| ((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 3)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
571UL))
|| ((b_ClockSelection ==
APCI1710_30MHZ)
&& (b_TimingUnit
== 4)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<= 9UL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 0)
&& (ul_LowTiming
>= 242)
&& (ul_LowTiming
<=
0xFFFFFFFFUL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 1)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
519691043UL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 2)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
519691UL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 3)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
520UL))
|| ((b_ClockSelection ==
APCI1710_33MHZ)
&& (b_TimingUnit
== 4)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<= 8UL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 0)
&& (ul_LowTiming
>= 200)
&& (ul_LowTiming
<=
0xFFFFFFFFUL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 1)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
429496729UL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 2)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
429496UL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 3)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
429UL))
|| ((b_ClockSelection ==
APCI1710_40MHZ)
&& (b_TimingUnit
== 4)
&& (ul_LowTiming
>= 1)
&& (ul_LowTiming
<=
7UL))) {
/**********************************/
/* Test the High timing selection */
/**********************************/
if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 266) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 242) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 200) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 7UL))) {
/************************************/
/* Calculate the low division fator */
/************************************/
fpu_begin();
switch (b_TimingUnit) {
/******/
/* ns */
/******/
case 0:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
(unsigned int)
(ul_LowTiming
*
(0.00025 * b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_LowTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(0.00025 * (double)b_ClockSelection));
d_RealLowTiming
=
(double)
ul_LowTimerValue
/
(0.00025
*
(double)
b_ClockSelection);
if ((double)((double)ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
ul_RealLowTiming
=
ul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
/******/
/* æs */
/******/
case 1:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
(unsigned int)
(ul_LowTiming
*
(0.25 * b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_LowTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(0.25 * (double)b_ClockSelection));
d_RealLowTiming
=
(double)
ul_LowTimerValue
/
(
(double)
0.25
*
(double)
b_ClockSelection);
if ((double)((double)ul_LowTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
ul_RealLowTiming
=
ul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
/******/
/* ms */
/******/
case 2:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
ul_LowTiming
*
(250.0
*
b_ClockSelection);
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_LowTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(250.0 * (double)b_ClockSelection));
d_RealLowTiming
=
(double)
ul_LowTimerValue
/
(250.0
*
(double)
b_ClockSelection);
if ((double)((double)ul_LowTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
ul_RealLowTiming
=
ul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
/*****/
/* s */
/*****/
case 3:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
(unsigned int)
(ul_LowTiming
*
(250000.0
*
b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_LowTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(250000.0
*
(double)
b_ClockSelection));
d_RealLowTiming
=
(double)
ul_LowTimerValue
/
(250000.0
*
(double)
b_ClockSelection);
if ((double)((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
ul_RealLowTiming
=
ul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
/******/
/* mn */
/******/
case 4:
/******************/
/* Timer 0 factor */
/******************/
ul_LowTimerValue
=
(unsigned int)
(
(ul_LowTiming
*
60)
*
(250000.0
*
b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)(ul_LowTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
ul_LowTimerValue
=
ul_LowTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealLowTiming
=
(unsigned int)
(ul_LowTimerValue
/
(250000.0
*
(double)
b_ClockSelection))
/
60;
d_RealLowTiming
=
(
(double)
ul_LowTimerValue
/
(250000.0
*
(double)
b_ClockSelection))
/
60.0;
if ((double)(((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)ul_RealLowTiming + 0.5)) {
ul_RealLowTiming
=
ul_RealLowTiming
+
1;
}
ul_LowTiming
=
ul_LowTiming
-
1;
ul_LowTimerValue
=
ul_LowTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_LowTimerValue
=
(unsigned int)
(
(double)
(ul_LowTimerValue)
*
1.007752288);
}
break;
}
/*************************************/
/* Calculate the high division fator */
/*************************************/
switch (b_TimingUnit) {
/******/
/* ns */
/******/
case 0:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
(unsigned int)
(ul_HighTiming
*
(0.00025 * b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_HighTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(0.00025 * (double)b_ClockSelection));
d_RealHighTiming
=
(double)
ul_HighTimerValue
/
(0.00025
*
(double)
b_ClockSelection);
if ((double)((double)ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
ul_RealHighTiming
=
ul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
/******/
/* æs */
/******/
case 1:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
(unsigned int)
(ul_HighTiming
*
(0.25 * b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_HighTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(0.25 * (double)b_ClockSelection));
d_RealHighTiming
=
(double)
ul_HighTimerValue
/
(
(double)
0.25
*
(double)
b_ClockSelection);
if ((double)((double)ul_HighTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
ul_RealHighTiming
=
ul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
/******/
/* ms */
/******/
case 2:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
ul_HighTiming
*
(250.0
*
b_ClockSelection);
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_HighTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(250.0 * (double)b_ClockSelection));
d_RealHighTiming
=
(double)
ul_HighTimerValue
/
(250.0
*
(double)
b_ClockSelection);
if ((double)((double)ul_HighTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
ul_RealHighTiming
=
ul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
/*****/
/* s */
/*****/
case 3:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
(unsigned int)
(ul_HighTiming
*
(250000.0
*
b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_HighTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(250000.0
*
(double)
b_ClockSelection));
d_RealHighTiming
=
(double)
ul_HighTimerValue
/
(250000.0
*
(double)
b_ClockSelection);
if ((double)((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
ul_RealHighTiming
=
ul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
/******/
/* mn */
/******/
case 4:
/******************/
/* Timer 0 factor */
/******************/
ul_HighTimerValue
=
(unsigned int)
(
(ul_HighTiming
*
60)
*
(250000.0
*
b_ClockSelection));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)(ul_HighTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
ul_HighTimerValue
=
ul_HighTimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealHighTiming
=
(unsigned int)
(ul_HighTimerValue
/
(250000.0
*
(double)
b_ClockSelection))
/
60;
d_RealHighTiming
=
(
(double)
ul_HighTimerValue
/
(250000.0
*
(double)
b_ClockSelection))
/
60.0;
if ((double)(((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)ul_RealHighTiming + 0.5)) {
ul_RealHighTiming
=
ul_RealHighTiming
+
1;
}
ul_HighTiming
=
ul_HighTiming
-
1;
ul_HighTimerValue
=
ul_HighTimerValue
-
2;
if (b_ClockSelection != APCI1710_40MHZ) {
ul_HighTimerValue
=
(unsigned int)
(
(double)
(ul_HighTimerValue)
*
1.007752288);
}
break;
}
fpu_end();
/************************/
/* Save the timing unit */
/************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
b_TimingUnit
=
b_TimingUnit;
/****************************/
/* Save the low base timing */
/****************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
d_LowTiming
=
d_RealLowTiming;
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
ul_RealLowTiming
=
ul_RealLowTiming;
/****************************/
/* Save the high base timing */
/****************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
d_HighTiming
=
d_RealHighTiming;
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_PWMModuleInfo.
s_PWMInfo
[b_PWM].
ul_RealHighTiming
=
ul_RealHighTiming;
/************************/
/* Write the low timing */
/************************/
outl(ul_LowTimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (20 * b_PWM) + (64 * b_ModulNbr));
/*************************/
/* Write the high timing */
/*************************/
outl(ul_HighTimerValue, devpriv->s_BoardInfos.ui_Address + 4 + (20 * b_PWM) + (64 * b_ModulNbr));
/***************************/
/* Set the clock selection */
/***************************/
dw_Command =
inl
(devpriv->
s_BoardInfos.
ui_Address
+ 8 +
(20 * b_PWM) + (64 * b_ModulNbr));
dw_Command =
dw_Command
& 0x7F;
if (b_ClockSelection == APCI1710_40MHZ) {
dw_Command
=
dw_Command
|
0x80;
}
/***************************/
/* Set the clock selection */
/***************************/
outl(dw_Command,
devpriv->
s_BoardInfos.
ui_Address
+ 8 +
(20 * b_PWM) + (64 * b_ModulNbr));
} else {
/***************************************/
/* High base timing selection is wrong */
/***************************************/
DPRINTK("High base timing selection is wrong\n");
i_ReturnValue =
-8;
}
} else {
/**************************************/
/* Low base timing selection is wrong */
/**************************************/
DPRINTK("Low base timing selection is wrong\n");
i_ReturnValue = -7;
}
} /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
else {
/**********************************/
/* Timing unit selection is wrong */
/**********************************/
DPRINTK("Timing unit selection is wrong\n");
i_ReturnValue = -6;
} /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
} /* if (dw_Status & 0x10) */
else {
/***********************/
/* PWM not initialised */
/***********************/
DPRINTK("PWM not initialised\n");
i_ReturnValue = -5;
} /* if (dw_Status & 0x10) */
} /* if (b_PWM >= 0 && b_PWM <= 1) */
else {
/******************************/
/* Tor PWM selection is wrong */
/******************************/
DPRINTK("Tor PWM selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_PWM >= 0 && b_PWM <= 1) */
} else {
/**********************************/
/* The module is not a PWM module */
/**********************************/
DPRINTK("The module is not a PWM module\n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_GetPWMStatus |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_PWM, |
| unsigned char *_ pb_PWMOutputStatus, |
| unsigned char *_ pb_ExternGateStatus) |
+----------------------------------------------------------------------------+
| Task : Return the status from selected PWM (b_PWM) from |
| selected module (b_ModulNbr). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_PWM : Selected PWM (0 or 1) |
| unsigned char_ b_ModulNbr : Selected module number (0 to 3)
b_ModulNbr =(unsigned char) CR_AREF(insn->chanspec);
b_PWM =(unsigned char) data[0];
|
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_PWMOutputStatus : Return the PWM output |
| level status. |
| 0 : The PWM output level|
| is low. |
| 1 : The PWM output level|
| is high. |
| unsigned char *_ pb_ExternGateStatus : Return the extern gate |
| level status. |
| 0 : The extern gate is |
| low. |
| 1 : The extern gate is |
| high.
pb_PWMOutputStatus =(unsigned char *) data[0];
pb_ExternGateStatus =(unsigned char *) data[1]; |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a PWM module |
| -4: PWM selection is wrong |
| -5: PWM not initialised see function |
| "i_APCI1710_InitPWM" |
| -6: PWM not enabled see function "i_APCI1710_EnablePWM"|
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnReadGetPWMStatus(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int i_ReturnValue = 0;
unsigned int dw_Status;
unsigned char b_ModulNbr;
unsigned char b_PWM;
unsigned char *pb_PWMOutputStatus;
unsigned char *pb_ExternGateStatus;
i_ReturnValue = insn->n;
b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
b_PWM = (unsigned char) CR_CHAN(insn->chanspec);
pb_PWMOutputStatus = (unsigned char *) &data[0];
pb_ExternGateStatus = (unsigned char *) &data[1];
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/***************/
/* Test if PWM */
/***************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_PWM) {
/**************************/
/* Test the PWM selection */
/**************************/
if (b_PWM <= 1) {
/***************************/
/* Test if PWM initialised */
/***************************/
dw_Status = inl(devpriv->s_BoardInfos.
ui_Address + 12 + (20 * b_PWM) +
(64 * b_ModulNbr));
if (dw_Status & 0x10) {
/***********************/
/* Test if PWM enabled */
/***********************/
if (dw_Status & 0x1) {
*pb_PWMOutputStatus =
(unsigned char) ((dw_Status >> 7)
& 1);
*pb_ExternGateStatus =
(unsigned char) ((dw_Status >> 6)
& 1);
} /* if (dw_Status & 0x1) */
else {
/*******************/
/* PWM not enabled */
/*******************/
DPRINTK("PWM not enabled \n");
i_ReturnValue = -6;
} /* if (dw_Status & 0x1) */
} /* if (dw_Status & 0x10) */
else {
/***********************/
/* PWM not initialised */
/***********************/
DPRINTK("PWM not initialised\n");
i_ReturnValue = -5;
} /* if (dw_Status & 0x10) */
} /* if (b_PWM >= 0 && b_PWM <= 1) */
else {
/******************************/
/* Tor PWM selection is wrong */
/******************************/
DPRINTK("Tor PWM selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_PWM >= 0 && b_PWM <= 1) */
} else {
/**********************************/
/* The module is not a PWM module */
/**********************************/
DPRINTK("The module is not a PWM module\n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
int i_APCI1710_InsnBitsReadPWMInterrupt(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
{
data[0] = devpriv->s_InterruptParameters.
s_FIFOInterruptParameters[devpriv->
s_InterruptParameters.ui_Read].b_OldModuleMask;
data[1] = devpriv->s_InterruptParameters.
s_FIFOInterruptParameters[devpriv->
s_InterruptParameters.ui_Read].ul_OldInterruptMask;
data[2] = devpriv->s_InterruptParameters.
s_FIFOInterruptParameters[devpriv->
s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
/**************************/
/* Increment the read FIFO */
/***************************/
devpriv->
s_InterruptParameters.
ui_Read = (devpriv->
s_InterruptParameters.ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
return insn->n;
}
| gpl-2.0 |
devmapal/linux | drivers/misc/mic/cosm_client/cosm_scif_client.c | 133 | 6448 | /*
* Intel MIC Platform Software Stack (MPSS)
*
* Copyright(c) 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Intel MIC COSM Client Driver
*
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include "../cosm/cosm_main.h"
#define COSM_SCIF_MAX_RETRIES 10
#define COSM_HEARTBEAT_SEND_MSEC (COSM_HEARTBEAT_SEND_SEC * MSEC_PER_SEC)
static struct task_struct *client_thread;
static scif_epd_t client_epd;
static struct scif_peer_dev *client_spdev;
/*
* Reboot notifier: receives shutdown status from the OS and communicates it
* back to the COSM process on the host
*/
static int cosm_reboot_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct cosm_msg msg = { .id = COSM_MSG_SHUTDOWN_STATUS };
int rc;
event = (event == SYS_RESTART) ? SYSTEM_RESTART : event;
dev_info(&client_spdev->dev, "%s %d received event %ld\n",
__func__, __LINE__, event);
msg.shutdown_status = event;
rc = scif_send(client_epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
if (rc < 0)
dev_err(&client_spdev->dev, "%s %d scif_send rc %d\n",
__func__, __LINE__, rc);
return NOTIFY_DONE;
}
static struct notifier_block cosm_reboot = {
.notifier_call = cosm_reboot_event,
};
/* Set system time from timespec value received from the host */
static void cosm_set_time(struct cosm_msg *msg)
{
int rc = do_settimeofday64(&msg->timespec);
if (rc)
dev_err(&client_spdev->dev, "%s: %d settimeofday rc %d\n",
__func__, __LINE__, rc);
}
/* COSM client receive message processing */
static void cosm_client_recv(void)
{
struct cosm_msg msg;
int rc;
while (1) {
rc = scif_recv(client_epd, &msg, sizeof(msg), 0);
if (!rc) {
return;
} else if (rc < 0) {
dev_err(&client_spdev->dev, "%s: %d rc %d\n",
__func__, __LINE__, rc);
return;
}
dev_dbg(&client_spdev->dev, "%s: %d rc %d id 0x%llx\n",
__func__, __LINE__, rc, msg.id);
switch (msg.id) {
case COSM_MSG_SYNC_TIME:
cosm_set_time(&msg);
break;
case COSM_MSG_SHUTDOWN:
orderly_poweroff(true);
break;
default:
dev_err(&client_spdev->dev, "%s: %d unknown id %lld\n",
__func__, __LINE__, msg.id);
break;
}
}
}
/* Initiate connection to the COSM server on the host */
static int cosm_scif_connect(void)
{
struct scif_port_id port_id;
int i, rc;
client_epd = scif_open();
if (!client_epd) {
dev_err(&client_spdev->dev, "%s %d scif_open failed\n",
__func__, __LINE__);
return -ENOMEM;
}
port_id.node = 0;
port_id.port = SCIF_COSM_LISTEN_PORT;
for (i = 0; i < COSM_SCIF_MAX_RETRIES; i++) {
rc = scif_connect(client_epd, &port_id);
if (rc < 0)
msleep(1000);
else
break;
}
if (rc < 0) {
dev_err(&client_spdev->dev, "%s %d scif_connect rc %d\n",
__func__, __LINE__, rc);
scif_close(client_epd);
client_epd = NULL;
}
return rc < 0 ? rc : 0;
}
/* Close host SCIF connection */
static void cosm_scif_connect_exit(void)
{
if (client_epd) {
scif_close(client_epd);
client_epd = NULL;
}
}
/*
* COSM SCIF client thread function: waits for messages from the host and sends
* a heartbeat to the host
*/
static int cosm_scif_client(void *unused)
{
struct cosm_msg msg = { .id = COSM_MSG_HEARTBEAT };
struct scif_pollepd pollepd;
int rc;
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
pollepd.epd = client_epd;
pollepd.events = POLLIN;
rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_SEND_MSEC);
if (rc < 0) {
if (-EINTR != rc)
dev_err(&client_spdev->dev,
"%s %d scif_poll rc %d\n",
__func__, __LINE__, rc);
continue;
}
if (pollepd.revents & POLLIN)
cosm_client_recv();
msg.id = COSM_MSG_HEARTBEAT;
rc = scif_send(client_epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
if (rc < 0)
dev_err(&client_spdev->dev, "%s %d scif_send rc %d\n",
__func__, __LINE__, rc);
}
dev_dbg(&client_spdev->dev, "%s %d Client thread stopped\n",
__func__, __LINE__);
return 0;
}
static void cosm_scif_probe(struct scif_peer_dev *spdev)
{
int rc;
dev_dbg(&spdev->dev, "%s %d: dnode %d\n",
__func__, __LINE__, spdev->dnode);
/* We are only interested in the host with spdev->dnode == 0 */
if (spdev->dnode)
return;
client_spdev = spdev;
rc = cosm_scif_connect();
if (rc)
goto exit;
rc = register_reboot_notifier(&cosm_reboot);
if (rc) {
dev_err(&spdev->dev,
"reboot notifier registration failed rc %d\n", rc);
goto connect_exit;
}
client_thread = kthread_run(cosm_scif_client, NULL, "cosm_client");
if (IS_ERR(client_thread)) {
rc = PTR_ERR(client_thread);
dev_err(&spdev->dev, "%s %d kthread_run rc %d\n",
__func__, __LINE__, rc);
goto unreg_reboot;
}
return;
unreg_reboot:
unregister_reboot_notifier(&cosm_reboot);
connect_exit:
cosm_scif_connect_exit();
exit:
client_spdev = NULL;
}
static void cosm_scif_remove(struct scif_peer_dev *spdev)
{
int rc;
dev_dbg(&spdev->dev, "%s %d: dnode %d\n",
__func__, __LINE__, spdev->dnode);
if (spdev->dnode)
return;
if (!IS_ERR_OR_NULL(client_thread)) {
rc = send_sig(SIGKILL, client_thread, 0);
if (rc) {
pr_err("%s %d send_sig rc %d\n",
__func__, __LINE__, rc);
return;
}
kthread_stop(client_thread);
}
unregister_reboot_notifier(&cosm_reboot);
cosm_scif_connect_exit();
client_spdev = NULL;
}
static struct scif_client scif_client_cosm = {
.name = KBUILD_MODNAME,
.probe = cosm_scif_probe,
.remove = cosm_scif_remove,
};
static int __init cosm_client_init(void)
{
int rc = scif_client_register(&scif_client_cosm);
if (rc)
pr_err("scif_client_register failed rc %d\n", rc);
return rc;
}
static void __exit cosm_client_exit(void)
{
scif_client_unregister(&scif_client_cosm);
}
module_init(cosm_client_init);
module_exit(cosm_client_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) MIC card OS state management client driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
taozhijiang/linux | drivers/media/dvb-frontends/stv6110x.c | 133 | 11842 | /*
STV6110(A) Silicon tuner driver
Copyright (C) Manu Abraham <abraham.manu@gmail.com>
Copyright (C) ST Microelectronics
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "dvb_frontend.h"
#include "stv6110x_reg.h"
#include "stv6110x.h"
#include "stv6110x_priv.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
static unsigned int verbose;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
static int stv6110x_read_reg(struct stv6110x_state *stv6110x, u8 reg, u8 *data)
{
int ret;
const struct stv6110x_config *config = stv6110x->config;
u8 b0[] = { reg };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
{ .addr = config->addr, .flags = 0, .buf = b0, .len = 1 },
{ .addr = config->addr, .flags = I2C_M_RD, .buf = b1, .len = 1 }
};
ret = i2c_transfer(stv6110x->i2c, msg, 2);
if (ret != 2) {
dprintk(FE_ERROR, 1, "I/O Error");
return -EREMOTEIO;
}
*data = b1[0];
return 0;
}
static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 data[], int len)
{
int ret;
const struct stv6110x_config *config = stv6110x->config;
u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg = {
.addr = config->addr,
.flags = 0,
.buf = buf,
.len = len + 1
};
if (1 + len > sizeof(buf)) {
printk(KERN_WARNING
"%s: i2c wr: len=%d is too big!\n",
KBUILD_MODNAME, len);
return -EINVAL;
}
if (start + len > 8)
return -EINVAL;
buf[0] = start;
memcpy(&buf[1], data, len);
ret = i2c_transfer(stv6110x->i2c, &msg, 1);
if (ret != 1) {
dprintk(FE_ERROR, 1, "I/O Error");
return -EREMOTEIO;
}
return 0;
}
static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data)
{
return stv6110x_write_regs(stv6110x, reg, &data, 1);
}
static int stv6110x_init(struct dvb_frontend *fe)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
int ret;
ret = stv6110x_write_regs(stv6110x, 0, stv6110x->regs,
ARRAY_SIZE(stv6110x->regs));
if (ret < 0) {
dprintk(FE_ERROR, 1, "Initialization failed");
return -1;
}
return 0;
}
static int stv6110x_set_frequency(struct dvb_frontend *fe, u32 frequency)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
u32 rDiv, divider;
s32 pVal, pCalc, rDivOpt = 0, pCalcOpt = 1000;
u8 i;
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL1], CTRL1_K, (REFCLOCK_MHz - 16));
if (frequency <= 1023000) {
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_DIV4SEL, 1);
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_PRESC32_ON, 0);
pVal = 40;
} else if (frequency <= 1300000) {
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_DIV4SEL, 1);
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_PRESC32_ON, 1);
pVal = 40;
} else if (frequency <= 2046000) {
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_DIV4SEL, 0);
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_PRESC32_ON, 0);
pVal = 20;
} else {
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_DIV4SEL, 0);
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_PRESC32_ON, 1);
pVal = 20;
}
for (rDiv = 0; rDiv <= 3; rDiv++) {
pCalc = (REFCLOCK_kHz / 100) / R_DIV(rDiv);
if ((abs((s32)(pCalc - pVal))) < (abs((s32)(pCalcOpt - pVal))))
rDivOpt = rDiv;
pCalcOpt = (REFCLOCK_kHz / 100) / R_DIV(rDivOpt);
}
divider = (frequency * R_DIV(rDivOpt) * pVal) / REFCLOCK_kHz;
divider = (divider + 5) / 10;
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_R_DIV, rDivOpt);
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG1], TNG1_N_DIV_11_8, MSB(divider));
STV6110x_SETFIELD(stv6110x->regs[STV6110x_TNG0], TNG0_N_DIV_7_0, LSB(divider));
/* VCO Auto calibration */
STV6110x_SETFIELD(stv6110x->regs[STV6110x_STAT1], STAT1_CALVCO_STRT, 1);
stv6110x_write_reg(stv6110x, STV6110x_CTRL1, stv6110x->regs[STV6110x_CTRL1]);
stv6110x_write_reg(stv6110x, STV6110x_TNG1, stv6110x->regs[STV6110x_TNG1]);
stv6110x_write_reg(stv6110x, STV6110x_TNG0, stv6110x->regs[STV6110x_TNG0]);
stv6110x_write_reg(stv6110x, STV6110x_STAT1, stv6110x->regs[STV6110x_STAT1]);
for (i = 0; i < TRIALS; i++) {
stv6110x_read_reg(stv6110x, STV6110x_STAT1, &stv6110x->regs[STV6110x_STAT1]);
if (!STV6110x_GETFIELD(STAT1_CALVCO_STRT, stv6110x->regs[STV6110x_STAT1]))
break;
msleep(1);
}
return 0;
}
static int stv6110x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
stv6110x_read_reg(stv6110x, STV6110x_TNG1, &stv6110x->regs[STV6110x_TNG1]);
stv6110x_read_reg(stv6110x, STV6110x_TNG0, &stv6110x->regs[STV6110x_TNG0]);
*frequency = (MAKEWORD16(STV6110x_GETFIELD(TNG1_N_DIV_11_8, stv6110x->regs[STV6110x_TNG1]),
STV6110x_GETFIELD(TNG0_N_DIV_7_0, stv6110x->regs[STV6110x_TNG0]))) * REFCLOCK_kHz;
*frequency /= (1 << (STV6110x_GETFIELD(TNG1_R_DIV, stv6110x->regs[STV6110x_TNG1]) +
STV6110x_GETFIELD(TNG1_DIV4SEL, stv6110x->regs[STV6110x_TNG1])));
*frequency >>= 2;
return 0;
}
static int stv6110x_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
u32 halfbw;
u8 i;
halfbw = bandwidth >> 1;
if (halfbw > 36000000)
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL3], CTRL3_CF, 31); /* LPF */
else if (halfbw < 5000000)
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL3], CTRL3_CF, 0); /* LPF */
else
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL3], CTRL3_CF, ((halfbw / 1000000) - 5)); /* LPF */
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL3], CTRL3_RCCLK_OFF, 0x0); /* cal. clk activated */
STV6110x_SETFIELD(stv6110x->regs[STV6110x_STAT1], STAT1_CALRC_STRT, 0x1); /* LPF auto cal */
stv6110x_write_reg(stv6110x, STV6110x_CTRL3, stv6110x->regs[STV6110x_CTRL3]);
stv6110x_write_reg(stv6110x, STV6110x_STAT1, stv6110x->regs[STV6110x_STAT1]);
for (i = 0; i < TRIALS; i++) {
stv6110x_read_reg(stv6110x, STV6110x_STAT1, &stv6110x->regs[STV6110x_STAT1]);
if (!STV6110x_GETFIELD(STAT1_CALRC_STRT, stv6110x->regs[STV6110x_STAT1]))
break;
msleep(1);
}
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL3], CTRL3_RCCLK_OFF, 0x1); /* cal. done */
stv6110x_write_reg(stv6110x, STV6110x_CTRL3, stv6110x->regs[STV6110x_CTRL3]);
return 0;
}
static int stv6110x_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
stv6110x_read_reg(stv6110x, STV6110x_CTRL3, &stv6110x->regs[STV6110x_CTRL3]);
*bandwidth = (STV6110x_GETFIELD(CTRL3_CF, stv6110x->regs[STV6110x_CTRL3]) + 5) * 2000000;
return 0;
}
static int stv6110x_set_refclock(struct dvb_frontend *fe, u32 refclock)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
/* setup divider */
switch (refclock) {
default:
case 1:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 0);
break;
case 2:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 1);
break;
case 4:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 2);
break;
case 8:
case 0:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 3);
break;
}
stv6110x_write_reg(stv6110x, STV6110x_CTRL2, stv6110x->regs[STV6110x_CTRL2]);
return 0;
}
static int stv6110x_get_bbgain(struct dvb_frontend *fe, u32 *gain)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
stv6110x_read_reg(stv6110x, STV6110x_CTRL2, &stv6110x->regs[STV6110x_CTRL2]);
*gain = 2 * STV6110x_GETFIELD(CTRL2_BBGAIN, stv6110x->regs[STV6110x_CTRL2]);
return 0;
}
static int stv6110x_set_bbgain(struct dvb_frontend *fe, u32 gain)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_BBGAIN, gain / 2);
stv6110x_write_reg(stv6110x, STV6110x_CTRL2, stv6110x->regs[STV6110x_CTRL2]);
return 0;
}
static int stv6110x_set_mode(struct dvb_frontend *fe, enum tuner_mode mode)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
int ret;
switch (mode) {
case TUNER_SLEEP:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL1], CTRL1_SYN, 0);
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL1], CTRL1_RX, 0);
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL1], CTRL1_LPT, 0);
break;
case TUNER_WAKE:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL1], CTRL1_SYN, 1);
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL1], CTRL1_RX, 1);
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL1], CTRL1_LPT, 1);
break;
}
ret = stv6110x_write_reg(stv6110x, STV6110x_CTRL1, stv6110x->regs[STV6110x_CTRL1]);
if (ret < 0) {
dprintk(FE_ERROR, 1, "I/O Error");
return -EIO;
}
return 0;
}
static int stv6110x_sleep(struct dvb_frontend *fe)
{
if (fe->tuner_priv)
return stv6110x_set_mode(fe, TUNER_SLEEP);
return 0;
}
static int stv6110x_get_status(struct dvb_frontend *fe, u32 *status)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
stv6110x_read_reg(stv6110x, STV6110x_STAT1, &stv6110x->regs[STV6110x_STAT1]);
if (STV6110x_GETFIELD(STAT1_LOCK, stv6110x->regs[STV6110x_STAT1]))
*status = TUNER_PHASELOCKED;
else
*status = 0;
return 0;
}
static int stv6110x_release(struct dvb_frontend *fe)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(stv6110x);
return 0;
}
static struct dvb_tuner_ops stv6110x_ops = {
.info = {
.name = "STV6110(A) Silicon Tuner",
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_step = 0,
},
.release = stv6110x_release
};
static const struct stv6110x_devctl stv6110x_ctl = {
.tuner_init = stv6110x_init,
.tuner_sleep = stv6110x_sleep,
.tuner_set_mode = stv6110x_set_mode,
.tuner_set_frequency = stv6110x_set_frequency,
.tuner_get_frequency = stv6110x_get_frequency,
.tuner_set_bandwidth = stv6110x_set_bandwidth,
.tuner_get_bandwidth = stv6110x_get_bandwidth,
.tuner_set_bbgain = stv6110x_set_bbgain,
.tuner_get_bbgain = stv6110x_get_bbgain,
.tuner_set_refclk = stv6110x_set_refclock,
.tuner_get_status = stv6110x_get_status,
};
const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
const struct stv6110x_config *config,
struct i2c_adapter *i2c)
{
struct stv6110x_state *stv6110x;
u8 default_regs[] = {0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e};
stv6110x = kzalloc(sizeof (struct stv6110x_state), GFP_KERNEL);
if (!stv6110x)
return NULL;
stv6110x->i2c = i2c;
stv6110x->config = config;
stv6110x->devctl = &stv6110x_ctl;
memcpy(stv6110x->regs, default_regs, 8);
/* setup divider */
switch (stv6110x->config->clk_div) {
default:
case 1:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 0);
break;
case 2:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 1);
break;
case 4:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 2);
break;
case 8:
case 0:
STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 3);
break;
}
fe->tuner_priv = stv6110x;
fe->ops.tuner_ops = stv6110x_ops;
printk(KERN_INFO "%s: Attaching STV6110x\n", __func__);
return stv6110x->devctl;
}
EXPORT_SYMBOL(stv6110x_attach);
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("STV6110x Silicon tuner");
MODULE_LICENSE("GPL");
| gpl-2.0 |
zhiyisun/linux | arch/powerpc/platforms/cell/spufs/inode.c | 133 | 18171 |
/*
* SPU file system
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* Author: Arnd Bergmann <arndb@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/parser.h>
#include <asm/prom.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <linux/uaccess.h>
#include "spufs.h"
struct spufs_sb_info {
int debug;
};
static struct kmem_cache *spufs_inode_cache;
char *isolated_loader;
static int isolated_loader_size;
static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb)
{
return sb->s_fs_info;
}
static struct inode *
spufs_alloc_inode(struct super_block *sb)
{
struct spufs_inode_info *ei;
ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
if (!ei)
return NULL;
ei->i_gang = NULL;
ei->i_ctx = NULL;
ei->i_openers = 0;
return &ei->vfs_inode;
}
static void spufs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
}
static void spufs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, spufs_i_callback);
}
static void
spufs_init_once(void *p)
{
struct spufs_inode_info *ei = p;
inode_init_once(&ei->vfs_inode);
}
static struct inode *
spufs_new_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode;
inode = new_inode(sb);
if (!inode)
goto out;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
out:
return inode;
}
static int
spufs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
if ((attr->ia_valid & ATTR_SIZE) &&
(attr->ia_size != inode->i_size))
return -EINVAL;
setattr_copy(inode, attr);
mark_inode_dirty(inode);
return 0;
}
static int
spufs_new_file(struct super_block *sb, struct dentry *dentry,
const struct file_operations *fops, umode_t mode,
size_t size, struct spu_context *ctx)
{
static const struct inode_operations spufs_file_iops = {
.setattr = spufs_setattr,
};
struct inode *inode;
int ret;
ret = -ENOSPC;
inode = spufs_new_inode(sb, S_IFREG | mode);
if (!inode)
goto out;
ret = 0;
inode->i_op = &spufs_file_iops;
inode->i_fop = fops;
inode->i_size = size;
inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
d_add(dentry, inode);
out:
return ret;
}
static void
spufs_evict_inode(struct inode *inode)
{
struct spufs_inode_info *ei = SPUFS_I(inode);
clear_inode(inode);
if (ei->i_ctx)
put_spu_context(ei->i_ctx);
if (ei->i_gang)
put_spu_gang(ei->i_gang);
}
static void spufs_prune_dir(struct dentry *dir)
{
struct dentry *dentry, *tmp;
inode_lock(d_inode(dir));
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
spin_lock(&dentry->d_lock);
if (simple_positive(dentry)) {
dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
simple_unlink(d_inode(dir), dentry);
/* XXX: what was dcache_lock protecting here? Other
* filesystems (IB, configfs) release dcache_lock
* before unlink */
dput(dentry);
} else {
spin_unlock(&dentry->d_lock);
}
}
shrink_dcache_parent(dir);
inode_unlock(d_inode(dir));
}
/* Caller must hold parent->i_mutex */
static int spufs_rmdir(struct inode *parent, struct dentry *dir)
{
/* remove all entries */
int res;
spufs_prune_dir(dir);
d_drop(dir);
res = simple_rmdir(parent, dir);
/* We have to give up the mm_struct */
spu_forget(SPUFS_I(d_inode(dir))->i_ctx);
return res;
}
static int spufs_fill_dir(struct dentry *dir,
const struct spufs_tree_descr *files, umode_t mode,
struct spu_context *ctx)
{
while (files->name && files->name[0]) {
int ret;
struct dentry *dentry = d_alloc_name(dir, files->name);
if (!dentry)
return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
if (ret)
return ret;
files++;
}
return 0;
}
static int spufs_dir_close(struct inode *inode, struct file *file)
{
struct spu_context *ctx;
struct inode *parent;
struct dentry *dir;
int ret;
dir = file->f_path.dentry;
parent = d_inode(dir->d_parent);
ctx = SPUFS_I(d_inode(dir))->i_ctx;
inode_lock_nested(parent, I_MUTEX_PARENT);
ret = spufs_rmdir(parent, dir);
inode_unlock(parent);
WARN_ON(ret);
return dcache_dir_close(inode, file);
}
const struct file_operations spufs_context_fops = {
.open = dcache_dir_open,
.release = spufs_dir_close,
.llseek = dcache_dir_lseek,
.read = generic_read_dir,
.iterate_shared = dcache_readdir,
.fsync = noop_fsync,
};
EXPORT_SYMBOL_GPL(spufs_context_fops);
static int
spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
umode_t mode)
{
int ret;
struct inode *inode;
struct spu_context *ctx;
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
if (!inode)
return -ENOSPC;
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
inode->i_mode &= S_ISGID;
}
ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
SPUFS_I(inode)->i_ctx = ctx;
if (!ctx) {
iput(inode);
return -ENOSPC;
}
ctx->flags = flags;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inode_lock(inode);
dget(dentry);
inc_nlink(dir);
inc_nlink(inode);
d_instantiate(dentry, inode);
if (flags & SPU_CREATE_NOSCHED)
ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
mode, ctx);
else
ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
if (!ret && spufs_get_sb_info(dir->i_sb)->debug)
ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
mode, ctx);
if (ret)
spufs_rmdir(dir, dentry);
inode_unlock(inode);
return ret;
}
static int spufs_context_open(struct path *path)
{
int ret;
struct file *filp;
ret = get_unused_fd_flags(0);
if (ret < 0)
return ret;
filp = dentry_open(path, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
return PTR_ERR(filp);
}
filp->f_op = &spufs_context_fops;
fd_install(ret, filp);
return ret;
}
static struct spu_context *
spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
struct file *filp)
{
struct spu_context *tmp, *neighbor, *err;
int count, node;
int aff_supp;
aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next,
struct spu, cbe_list))->aff_list);
if (!aff_supp)
return ERR_PTR(-EINVAL);
if (flags & SPU_CREATE_GANG)
return ERR_PTR(-EINVAL);
if (flags & SPU_CREATE_AFFINITY_MEM &&
gang->aff_ref_ctx &&
gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM)
return ERR_PTR(-EEXIST);
if (gang->aff_flags & AFF_MERGED)
return ERR_PTR(-EBUSY);
neighbor = NULL;
if (flags & SPU_CREATE_AFFINITY_SPU) {
if (!filp || filp->f_op != &spufs_context_fops)
return ERR_PTR(-EINVAL);
neighbor = get_spu_context(
SPUFS_I(file_inode(filp))->i_ctx);
if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
!list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
!list_entry(neighbor->aff_list.next, struct spu_context,
aff_list)->aff_head) {
err = ERR_PTR(-EEXIST);
goto out_put_neighbor;
}
if (gang != neighbor->gang) {
err = ERR_PTR(-EINVAL);
goto out_put_neighbor;
}
count = 1;
list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
count++;
if (list_empty(&neighbor->aff_list))
count++;
for (node = 0; node < MAX_NUMNODES; node++) {
if ((cbe_spu_info[node].n_spus - atomic_read(
&cbe_spu_info[node].reserved_spus)) >= count)
break;
}
if (node == MAX_NUMNODES) {
err = ERR_PTR(-EEXIST);
goto out_put_neighbor;
}
}
return neighbor;
out_put_neighbor:
put_spu_context(neighbor);
return err;
}
static void
spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
struct spu_context *neighbor)
{
if (flags & SPU_CREATE_AFFINITY_MEM)
ctx->gang->aff_ref_ctx = ctx;
if (flags & SPU_CREATE_AFFINITY_SPU) {
if (list_empty(&neighbor->aff_list)) {
list_add_tail(&neighbor->aff_list,
&ctx->gang->aff_list_head);
neighbor->aff_head = 1;
}
if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head)
|| list_entry(neighbor->aff_list.next, struct spu_context,
aff_list)->aff_head) {
list_add(&ctx->aff_list, &neighbor->aff_list);
} else {
list_add_tail(&ctx->aff_list, &neighbor->aff_list);
if (neighbor->aff_head) {
neighbor->aff_head = 0;
ctx->aff_head = 1;
}
}
if (!ctx->gang->aff_ref_ctx)
ctx->gang->aff_ref_ctx = ctx;
}
}
static int
spufs_create_context(struct inode *inode, struct dentry *dentry,
struct vfsmount *mnt, int flags, umode_t mode,
struct file *aff_filp)
{
int ret;
int affinity;
struct spu_gang *gang;
struct spu_context *neighbor;
struct path path = {.mnt = mnt, .dentry = dentry};
if ((flags & SPU_CREATE_NOSCHED) &&
!capable(CAP_SYS_NICE))
return -EPERM;
if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
== SPU_CREATE_ISOLATE)
return -EINVAL;
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
return -ENODEV;
gang = NULL;
neighbor = NULL;
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
if (affinity) {
gang = SPUFS_I(inode)->i_gang;
if (!gang)
return -EINVAL;
mutex_lock(&gang->aff_mutex);
neighbor = spufs_assert_affinity(flags, gang, aff_filp);
if (IS_ERR(neighbor)) {
ret = PTR_ERR(neighbor);
goto out_aff_unlock;
}
}
ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
if (ret)
goto out_aff_unlock;
if (affinity) {
spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
neighbor);
if (neighbor)
put_spu_context(neighbor);
}
ret = spufs_context_open(&path);
if (ret < 0)
WARN_ON(spufs_rmdir(inode, dentry));
out_aff_unlock:
if (affinity)
mutex_unlock(&gang->aff_mutex);
return ret;
}
static int
spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int ret;
struct inode *inode;
struct spu_gang *gang;
ret = -ENOSPC;
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
if (!inode)
goto out;
ret = 0;
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
inode->i_mode &= S_ISGID;
}
gang = alloc_spu_gang();
SPUFS_I(inode)->i_ctx = NULL;
SPUFS_I(inode)->i_gang = gang;
if (!gang) {
ret = -ENOMEM;
goto out_iput;
}
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
inc_nlink(dir);
inc_nlink(d_inode(dentry));
return ret;
out_iput:
iput(inode);
out:
return ret;
}
static int spufs_gang_open(struct path *path)
{
int ret;
struct file *filp;
ret = get_unused_fd_flags(0);
if (ret < 0)
return ret;
/*
* get references for dget and mntget, will be released
* in error path of *_open().
*/
filp = dentry_open(path, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
return PTR_ERR(filp);
}
filp->f_op = &simple_dir_operations;
fd_install(ret, filp);
return ret;
}
static int spufs_create_gang(struct inode *inode,
struct dentry *dentry,
struct vfsmount *mnt, umode_t mode)
{
struct path path = {.mnt = mnt, .dentry = dentry};
int ret;
ret = spufs_mkgang(inode, dentry, mode & 0777);
if (!ret) {
ret = spufs_gang_open(&path);
if (ret < 0) {
int err = simple_rmdir(inode, dentry);
WARN_ON(err);
}
}
return ret;
}
static struct file_system_type spufs_type;
long spufs_create(struct path *path, struct dentry *dentry,
unsigned int flags, umode_t mode, struct file *filp)
{
struct inode *dir = d_inode(path->dentry);
int ret;
/* check if we are on spufs */
if (path->dentry->d_sb->s_type != &spufs_type)
return -EINVAL;
/* don't accept undefined flags */
if (flags & (~SPU_CREATE_FLAG_ALL))
return -EINVAL;
/* only threads can be underneath a gang */
if (path->dentry != path->dentry->d_sb->s_root)
if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang)
return -EINVAL;
mode &= ~current_umask();
if (flags & SPU_CREATE_GANG)
ret = spufs_create_gang(dir, dentry, path->mnt, mode);
else
ret = spufs_create_context(dir, dentry, path->mnt, flags, mode,
filp);
if (ret >= 0)
fsnotify_mkdir(dir, dentry);
return ret;
}
/* File system initialization */
enum {
Opt_uid, Opt_gid, Opt_mode, Opt_debug, Opt_err,
};
static const match_table_t spufs_tokens = {
{ Opt_uid, "uid=%d" },
{ Opt_gid, "gid=%d" },
{ Opt_mode, "mode=%o" },
{ Opt_debug, "debug" },
{ Opt_err, NULL },
};
static int spufs_show_options(struct seq_file *m, struct dentry *root)
{
struct spufs_sb_info *sbi = spufs_get_sb_info(root->d_sb);
struct inode *inode = root->d_inode;
if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
from_kuid_munged(&init_user_ns, inode->i_uid));
if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID))
seq_printf(m, ",gid=%u",
from_kgid_munged(&init_user_ns, inode->i_gid));
if ((inode->i_mode & S_IALLUGO) != 0775)
seq_printf(m, ",mode=%o", inode->i_mode);
if (sbi->debug)
seq_puts(m, ",debug");
return 0;
}
static int
spufs_parse_options(struct super_block *sb, char *options, struct inode *root)
{
char *p;
substring_t args[MAX_OPT_ARGS];
while ((p = strsep(&options, ",")) != NULL) {
int token, option;
if (!*p)
continue;
token = match_token(p, spufs_tokens, args);
switch (token) {
case Opt_uid:
if (match_int(&args[0], &option))
return 0;
root->i_uid = make_kuid(current_user_ns(), option);
if (!uid_valid(root->i_uid))
return 0;
break;
case Opt_gid:
if (match_int(&args[0], &option))
return 0;
root->i_gid = make_kgid(current_user_ns(), option);
if (!gid_valid(root->i_gid))
return 0;
break;
case Opt_mode:
if (match_octal(&args[0], &option))
return 0;
root->i_mode = option | S_IFDIR;
break;
case Opt_debug:
spufs_get_sb_info(sb)->debug = 1;
break;
default:
return 0;
}
}
return 1;
}
static void spufs_exit_isolated_loader(void)
{
free_pages((unsigned long) isolated_loader,
get_order(isolated_loader_size));
}
static void
spufs_init_isolated_loader(void)
{
struct device_node *dn;
const char *loader;
int size;
dn = of_find_node_by_path("/spu-isolation");
if (!dn)
return;
loader = of_get_property(dn, "loader", &size);
if (!loader)
return;
/* the loader must be align on a 16 byte boundary */
isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size));
if (!isolated_loader)
return;
isolated_loader_size = size;
memcpy(isolated_loader, loader, size);
printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
}
static int
spufs_create_root(struct super_block *sb, void *data)
{
struct inode *inode;
int ret;
ret = -ENODEV;
if (!spu_management_ops)
goto out;
ret = -ENOMEM;
inode = spufs_new_inode(sb, S_IFDIR | 0775);
if (!inode)
goto out;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
SPUFS_I(inode)->i_ctx = NULL;
inc_nlink(inode);
ret = -EINVAL;
if (!spufs_parse_options(sb, data, inode))
goto out_iput;
ret = -ENOMEM;
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto out;
return 0;
out_iput:
iput(inode);
out:
return ret;
}
static int
spufs_fill_super(struct super_block *sb, void *data, int silent)
{
struct spufs_sb_info *info;
static const struct super_operations s_ops = {
.alloc_inode = spufs_alloc_inode,
.destroy_inode = spufs_destroy_inode,
.statfs = simple_statfs,
.evict_inode = spufs_evict_inode,
.show_options = spufs_show_options,
};
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = SPUFS_MAGIC;
sb->s_op = &s_ops;
sb->s_fs_info = info;
return spufs_create_root(sb, data);
}
static struct dentry *
spufs_mount(struct file_system_type *fstype, int flags,
const char *name, void *data)
{
return mount_single(fstype, flags, data, spufs_fill_super);
}
static struct file_system_type spufs_type = {
.owner = THIS_MODULE,
.name = "spufs",
.mount = spufs_mount,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("spufs");
static int __init spufs_init(void)
{
int ret;
ret = -ENODEV;
if (!spu_management_ops)
goto out;
ret = -ENOMEM;
spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
sizeof(struct spufs_inode_info), 0,
SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, spufs_init_once);
if (!spufs_inode_cache)
goto out;
ret = spu_sched_init();
if (ret)
goto out_cache;
ret = register_spu_syscalls(&spufs_calls);
if (ret)
goto out_sched;
ret = register_filesystem(&spufs_type);
if (ret)
goto out_syscalls;
spufs_init_isolated_loader();
return 0;
out_syscalls:
unregister_spu_syscalls(&spufs_calls);
out_sched:
spu_sched_exit();
out_cache:
kmem_cache_destroy(spufs_inode_cache);
out:
return ret;
}
module_init(spufs_init);
static void __exit spufs_exit(void)
{
spu_sched_exit();
spufs_exit_isolated_loader();
unregister_spu_syscalls(&spufs_calls);
unregister_filesystem(&spufs_type);
kmem_cache_destroy(spufs_inode_cache);
}
module_exit(spufs_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
| gpl-2.0 |
iamroot9C-arm/linux | drivers/media/dvb/frontends/af9033.c | 133 | 20368 | /*
* Afatech AF9033 demodulator driver
*
* Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
* Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "af9033_priv.h"
struct af9033_state {
struct i2c_adapter *i2c;
struct dvb_frontend fe;
struct af9033_config cfg;
u32 bandwidth_hz;
bool ts_mode_parallel;
bool ts_mode_serial;
u32 ber;
u32 ucb;
unsigned long last_stat_check;
};
/* write multiple registers */
static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val,
int len)
{
int ret;
u8 buf[3 + len];
struct i2c_msg msg[1] = {
{
.addr = state->cfg.i2c_addr,
.flags = 0,
.len = sizeof(buf),
.buf = buf,
}
};
buf[0] = (reg >> 16) & 0xff;
buf[1] = (reg >> 8) & 0xff;
buf[2] = (reg >> 0) & 0xff;
memcpy(&buf[3], val, len);
ret = i2c_transfer(state->i2c, msg, 1);
if (ret == 1) {
ret = 0;
} else {
printk(KERN_WARNING "%s: i2c wr failed=%d reg=%06x len=%d\n",
__func__, ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
}
/* read multiple registers */
static int af9033_rd_regs(struct af9033_state *state, u32 reg, u8 *val, int len)
{
int ret;
u8 buf[3] = { (reg >> 16) & 0xff, (reg >> 8) & 0xff,
(reg >> 0) & 0xff };
struct i2c_msg msg[2] = {
{
.addr = state->cfg.i2c_addr,
.flags = 0,
.len = sizeof(buf),
.buf = buf
}, {
.addr = state->cfg.i2c_addr,
.flags = I2C_M_RD,
.len = len,
.buf = val
}
};
ret = i2c_transfer(state->i2c, msg, 2);
if (ret == 2) {
ret = 0;
} else {
printk(KERN_WARNING "%s: i2c rd failed=%d reg=%06x len=%d\n",
__func__, ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
}
/* write single register */
static int af9033_wr_reg(struct af9033_state *state, u32 reg, u8 val)
{
return af9033_wr_regs(state, reg, &val, 1);
}
/* read single register */
static int af9033_rd_reg(struct af9033_state *state, u32 reg, u8 *val)
{
return af9033_rd_regs(state, reg, val, 1);
}
/* write single register with mask */
static int af9033_wr_reg_mask(struct af9033_state *state, u32 reg, u8 val,
u8 mask)
{
int ret;
u8 tmp;
/* no need for read if whole reg is written */
if (mask != 0xff) {
ret = af9033_rd_regs(state, reg, &tmp, 1);
if (ret)
return ret;
val &= mask;
tmp &= ~mask;
val |= tmp;
}
return af9033_wr_regs(state, reg, &val, 1);
}
/* read single register with mask */
static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
u8 mask)
{
int ret, i;
u8 tmp;
ret = af9033_rd_regs(state, reg, &tmp, 1);
if (ret)
return ret;
tmp &= mask;
/* find position of the first bit */
for (i = 0; i < 8; i++) {
if ((mask >> i) & 0x01)
break;
}
*val = tmp >> i;
return 0;
}
static u32 af9033_div(u32 a, u32 b, u32 x)
{
u32 r = 0, c = 0, i;
pr_debug("%s: a=%d b=%d x=%d\n", __func__, a, b, x);
if (a > b) {
c = a / b;
a = a - c * b;
}
for (i = 0; i < x; i++) {
if (a >= b) {
r += 1;
a -= b;
}
a <<= 1;
r <<= 1;
}
r = (c << (u32)x) + r;
pr_debug("%s: a=%d b=%d x=%d r=%d r=%x\n", __func__, a, b, x, r, r);
return r;
}
static void af9033_release(struct dvb_frontend *fe)
{
struct af9033_state *state = fe->demodulator_priv;
kfree(state);
}
static int af9033_init(struct dvb_frontend *fe)
{
struct af9033_state *state = fe->demodulator_priv;
int ret, i, len;
const struct reg_val *init;
u8 buf[4];
u32 adc_cw, clock_cw;
struct reg_val_mask tab[] = {
{ 0x80fb24, 0x00, 0x08 },
{ 0x80004c, 0x00, 0xff },
{ 0x00f641, state->cfg.tuner, 0xff },
{ 0x80f5ca, 0x01, 0x01 },
{ 0x80f715, 0x01, 0x01 },
{ 0x00f41f, 0x04, 0x04 },
{ 0x00f41a, 0x01, 0x01 },
{ 0x80f731, 0x00, 0x01 },
{ 0x00d91e, 0x00, 0x01 },
{ 0x00d919, 0x00, 0x01 },
{ 0x80f732, 0x00, 0x01 },
{ 0x00d91f, 0x00, 0x01 },
{ 0x00d91a, 0x00, 0x01 },
{ 0x80f730, 0x00, 0x01 },
{ 0x80f778, 0x00, 0xff },
{ 0x80f73c, 0x01, 0x01 },
{ 0x80f776, 0x00, 0x01 },
{ 0x00d8fd, 0x01, 0xff },
{ 0x00d830, 0x01, 0xff },
{ 0x00d831, 0x00, 0xff },
{ 0x00d832, 0x00, 0xff },
{ 0x80f985, state->ts_mode_serial, 0x01 },
{ 0x80f986, state->ts_mode_parallel, 0x01 },
{ 0x00d827, 0x00, 0xff },
{ 0x00d829, 0x00, 0xff },
};
/* program clock control */
clock_cw = af9033_div(state->cfg.clock, 1000000ul, 19ul);
buf[0] = (clock_cw >> 0) & 0xff;
buf[1] = (clock_cw >> 8) & 0xff;
buf[2] = (clock_cw >> 16) & 0xff;
buf[3] = (clock_cw >> 24) & 0xff;
pr_debug("%s: clock=%d clock_cw=%08x\n", __func__, state->cfg.clock,
clock_cw);
ret = af9033_wr_regs(state, 0x800025, buf, 4);
if (ret < 0)
goto err;
/* program ADC control */
for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
if (clock_adc_lut[i].clock == state->cfg.clock)
break;
}
adc_cw = af9033_div(clock_adc_lut[i].adc, 1000000ul, 19ul);
buf[0] = (adc_cw >> 0) & 0xff;
buf[1] = (adc_cw >> 8) & 0xff;
buf[2] = (adc_cw >> 16) & 0xff;
pr_debug("%s: adc=%d adc_cw=%06x\n", __func__, clock_adc_lut[i].adc,
adc_cw);
ret = af9033_wr_regs(state, 0x80f1cd, buf, 3);
if (ret < 0)
goto err;
/* program register table */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = af9033_wr_reg_mask(state, tab[i].reg, tab[i].val,
tab[i].mask);
if (ret < 0)
goto err;
}
/* settings for TS interface */
if (state->cfg.ts_mode == AF9033_TS_MODE_USB) {
ret = af9033_wr_reg_mask(state, 0x80f9a5, 0x00, 0x01);
if (ret < 0)
goto err;
ret = af9033_wr_reg_mask(state, 0x80f9b5, 0x01, 0x01);
if (ret < 0)
goto err;
} else {
ret = af9033_wr_reg_mask(state, 0x80f990, 0x00, 0x01);
if (ret < 0)
goto err;
ret = af9033_wr_reg_mask(state, 0x80f9b5, 0x00, 0x01);
if (ret < 0)
goto err;
}
/* load OFSM settings */
pr_debug("%s: load ofsm settings\n", __func__);
len = ARRAY_SIZE(ofsm_init);
init = ofsm_init;
for (i = 0; i < len; i++) {
ret = af9033_wr_reg(state, init[i].reg, init[i].val);
if (ret < 0)
goto err;
}
/* load tuner specific settings */
pr_debug("%s: load tuner specific settings\n",
__func__);
switch (state->cfg.tuner) {
case AF9033_TUNER_TUA9001:
len = ARRAY_SIZE(tuner_init_tua9001);
init = tuner_init_tua9001;
break;
case AF9033_TUNER_FC0011:
len = ARRAY_SIZE(tuner_init_fc0011);
init = tuner_init_fc0011;
break;
case AF9033_TUNER_MXL5007T:
len = ARRAY_SIZE(tuner_init_mxl5007t);
init = tuner_init_mxl5007t;
break;
case AF9033_TUNER_TDA18218:
len = ARRAY_SIZE(tuner_init_tda18218);
init = tuner_init_tda18218;
break;
default:
pr_debug("%s: unsupported tuner ID=%d\n", __func__,
state->cfg.tuner);
ret = -ENODEV;
goto err;
}
for (i = 0; i < len; i++) {
ret = af9033_wr_reg(state, init[i].reg, init[i].val);
if (ret < 0)
goto err;
}
state->bandwidth_hz = 0; /* force to program all parameters */
return 0;
err:
pr_debug("%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9033_sleep(struct dvb_frontend *fe)
{
struct af9033_state *state = fe->demodulator_priv;
int ret, i;
u8 tmp;
ret = af9033_wr_reg(state, 0x80004c, 1);
if (ret < 0)
goto err;
ret = af9033_wr_reg(state, 0x800000, 0);
if (ret < 0)
goto err;
for (i = 100, tmp = 1; i && tmp; i--) {
ret = af9033_rd_reg(state, 0x80004c, &tmp);
if (ret < 0)
goto err;
usleep_range(200, 10000);
}
pr_debug("%s: loop=%d\n", __func__, i);
if (i == 0) {
ret = -ETIMEDOUT;
goto err;
}
ret = af9033_wr_reg_mask(state, 0x80fb24, 0x08, 0x08);
if (ret < 0)
goto err;
/* prevent current leak (?) */
if (state->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
/* enable parallel TS */
ret = af9033_wr_reg_mask(state, 0x00d917, 0x00, 0x01);
if (ret < 0)
goto err;
ret = af9033_wr_reg_mask(state, 0x00d916, 0x01, 0x01);
if (ret < 0)
goto err;
}
return 0;
err:
pr_debug("%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9033_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *fesettings)
{
fesettings->min_delay_ms = 800;
fesettings->step_size = 0;
fesettings->max_drift = 0;
return 0;
}
static int af9033_set_frontend(struct dvb_frontend *fe)
{
struct af9033_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i, spec_inv;
u8 tmp, buf[3], bandwidth_reg_val;
u32 if_frequency, freq_cw, adc_freq;
pr_debug("%s: frequency=%d bandwidth_hz=%d\n", __func__, c->frequency,
c->bandwidth_hz);
/* check bandwidth */
switch (c->bandwidth_hz) {
case 6000000:
bandwidth_reg_val = 0x00;
break;
case 7000000:
bandwidth_reg_val = 0x01;
break;
case 8000000:
bandwidth_reg_val = 0x02;
break;
default:
pr_debug("%s: invalid bandwidth_hz\n", __func__);
ret = -EINVAL;
goto err;
}
/* program tuner */
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
/* program CFOE coefficients */
if (c->bandwidth_hz != state->bandwidth_hz) {
for (i = 0; i < ARRAY_SIZE(coeff_lut); i++) {
if (coeff_lut[i].clock == state->cfg.clock &&
coeff_lut[i].bandwidth_hz == c->bandwidth_hz) {
break;
}
}
ret = af9033_wr_regs(state, 0x800001,
coeff_lut[i].val, sizeof(coeff_lut[i].val));
}
/* program frequency control */
if (c->bandwidth_hz != state->bandwidth_hz) {
spec_inv = state->cfg.spec_inv ? -1 : 1;
for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
if (clock_adc_lut[i].clock == state->cfg.clock)
break;
}
adc_freq = clock_adc_lut[i].adc;
/* get used IF frequency */
if (fe->ops.tuner_ops.get_if_frequency)
fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
else
if_frequency = 0;
while (if_frequency > (adc_freq / 2))
if_frequency -= adc_freq;
if (if_frequency >= 0)
spec_inv *= -1;
else
if_frequency *= -1;
freq_cw = af9033_div(if_frequency, adc_freq, 23ul);
if (spec_inv == -1)
freq_cw *= -1;
/* get adc multiplies */
ret = af9033_rd_reg(state, 0x800045, &tmp);
if (ret < 0)
goto err;
if (tmp == 1)
freq_cw /= 2;
buf[0] = (freq_cw >> 0) & 0xff;
buf[1] = (freq_cw >> 8) & 0xff;
buf[2] = (freq_cw >> 16) & 0x7f;
ret = af9033_wr_regs(state, 0x800029, buf, 3);
if (ret < 0)
goto err;
state->bandwidth_hz = c->bandwidth_hz;
}
ret = af9033_wr_reg_mask(state, 0x80f904, bandwidth_reg_val, 0x03);
if (ret < 0)
goto err;
ret = af9033_wr_reg(state, 0x800040, 0x00);
if (ret < 0)
goto err;
ret = af9033_wr_reg(state, 0x800047, 0x00);
if (ret < 0)
goto err;
ret = af9033_wr_reg_mask(state, 0x80f999, 0x00, 0x01);
if (ret < 0)
goto err;
if (c->frequency <= 230000000)
tmp = 0x00; /* VHF */
else
tmp = 0x01; /* UHF */
ret = af9033_wr_reg(state, 0x80004b, tmp);
if (ret < 0)
goto err;
ret = af9033_wr_reg(state, 0x800000, 0x00);
if (ret < 0)
goto err;
return 0;
err:
pr_debug("%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9033_get_frontend(struct dvb_frontend *fe)
{
struct af9033_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[8];
pr_debug("%s\n", __func__);
/* read all needed registers */
ret = af9033_rd_regs(state, 0x80f900, buf, sizeof(buf));
if (ret < 0)
goto err;
switch ((buf[0] >> 0) & 3) {
case 0:
c->transmission_mode = TRANSMISSION_MODE_2K;
break;
case 1:
c->transmission_mode = TRANSMISSION_MODE_8K;
break;
}
switch ((buf[1] >> 0) & 3) {
case 0:
c->guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
c->guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
c->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
c->guard_interval = GUARD_INTERVAL_1_4;
break;
}
switch ((buf[2] >> 0) & 7) {
case 0:
c->hierarchy = HIERARCHY_NONE;
break;
case 1:
c->hierarchy = HIERARCHY_1;
break;
case 2:
c->hierarchy = HIERARCHY_2;
break;
case 3:
c->hierarchy = HIERARCHY_4;
break;
}
switch ((buf[3] >> 0) & 3) {
case 0:
c->modulation = QPSK;
break;
case 1:
c->modulation = QAM_16;
break;
case 2:
c->modulation = QAM_64;
break;
}
switch ((buf[4] >> 0) & 3) {
case 0:
c->bandwidth_hz = 6000000;
break;
case 1:
c->bandwidth_hz = 7000000;
break;
case 2:
c->bandwidth_hz = 8000000;
break;
}
switch ((buf[6] >> 0) & 7) {
case 0:
c->code_rate_HP = FEC_1_2;
break;
case 1:
c->code_rate_HP = FEC_2_3;
break;
case 2:
c->code_rate_HP = FEC_3_4;
break;
case 3:
c->code_rate_HP = FEC_5_6;
break;
case 4:
c->code_rate_HP = FEC_7_8;
break;
case 5:
c->code_rate_HP = FEC_NONE;
break;
}
switch ((buf[7] >> 0) & 7) {
case 0:
c->code_rate_LP = FEC_1_2;
break;
case 1:
c->code_rate_LP = FEC_2_3;
break;
case 2:
c->code_rate_LP = FEC_3_4;
break;
case 3:
c->code_rate_LP = FEC_5_6;
break;
case 4:
c->code_rate_LP = FEC_7_8;
break;
case 5:
c->code_rate_LP = FEC_NONE;
break;
}
return 0;
err:
pr_debug("%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9033_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct af9033_state *state = fe->demodulator_priv;
int ret;
u8 tmp;
*status = 0;
/* radio channel status, 0=no result, 1=has signal, 2=no signal */
ret = af9033_rd_reg(state, 0x800047, &tmp);
if (ret < 0)
goto err;
/* has signal */
if (tmp == 0x01)
*status |= FE_HAS_SIGNAL;
if (tmp != 0x02) {
/* TPS lock */
ret = af9033_rd_reg_mask(state, 0x80f5a9, &tmp, 0x01);
if (ret < 0)
goto err;
if (tmp)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI;
/* full lock */
ret = af9033_rd_reg_mask(state, 0x80f999, &tmp, 0x01);
if (ret < 0)
goto err;
if (tmp)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC |
FE_HAS_LOCK;
}
return 0;
err:
pr_debug("%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct af9033_state *state = fe->demodulator_priv;
int ret, i, len;
u8 buf[3], tmp;
u32 snr_val;
const struct val_snr *uninitialized_var(snr_lut);
/* read value */
ret = af9033_rd_regs(state, 0x80002c, buf, 3);
if (ret < 0)
goto err;
snr_val = (buf[2] << 16) | (buf[1] << 8) | buf[0];
/* read current modulation */
ret = af9033_rd_reg(state, 0x80f903, &tmp);
if (ret < 0)
goto err;
switch ((tmp >> 0) & 3) {
case 0:
len = ARRAY_SIZE(qpsk_snr_lut);
snr_lut = qpsk_snr_lut;
break;
case 1:
len = ARRAY_SIZE(qam16_snr_lut);
snr_lut = qam16_snr_lut;
break;
case 2:
len = ARRAY_SIZE(qam64_snr_lut);
snr_lut = qam64_snr_lut;
break;
default:
goto err;
}
for (i = 0; i < len; i++) {
tmp = snr_lut[i].snr;
if (snr_val < snr_lut[i].val)
break;
}
*snr = tmp * 10; /* dB/10 */
return 0;
err:
pr_debug("%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct af9033_state *state = fe->demodulator_priv;
int ret;
u8 strength2;
/* read signal strength of 0-100 scale */
ret = af9033_rd_reg(state, 0x800048, &strength2);
if (ret < 0)
goto err;
/* scale value to 0x0000-0xffff */
*strength = strength2 * 0xffff / 100;
return 0;
err:
pr_debug("%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9033_update_ch_stat(struct af9033_state *state)
{
int ret = 0;
u32 err_cnt, bit_cnt;
u16 abort_cnt;
u8 buf[7];
/* only update data every half second */
if (time_after(jiffies, state->last_stat_check + msecs_to_jiffies(500))) {
ret = af9033_rd_regs(state, 0x800032, buf, sizeof(buf));
if (ret < 0)
goto err;
/* in 8 byte packets? */
abort_cnt = (buf[1] << 8) + buf[0];
/* in bits */
err_cnt = (buf[4] << 16) + (buf[3] << 8) + buf[2];
/* in 8 byte packets? always(?) 0x2710 = 10000 */
bit_cnt = (buf[6] << 8) + buf[5];
if (bit_cnt < abort_cnt) {
abort_cnt = 1000;
state->ber = 0xffffffff;
} else {
/* 8 byte packets, that have not been rejected already */
bit_cnt -= (u32)abort_cnt;
if (bit_cnt == 0) {
state->ber = 0xffffffff;
} else {
err_cnt -= (u32)abort_cnt * 8 * 8;
bit_cnt *= 8 * 8;
state->ber = err_cnt * (0xffffffff / bit_cnt);
}
}
state->ucb += abort_cnt;
state->last_stat_check = jiffies;
}
return 0;
err:
pr_debug("%s: failed=%d\n", __func__, ret);
return ret;
}
static int af9033_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct af9033_state *state = fe->demodulator_priv;
int ret;
ret = af9033_update_ch_stat(state);
if (ret < 0)
return ret;
*ber = state->ber;
return 0;
}
static int af9033_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct af9033_state *state = fe->demodulator_priv;
int ret;
ret = af9033_update_ch_stat(state);
if (ret < 0)
return ret;
*ucblocks = state->ucb;
return 0;
}
static int af9033_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct af9033_state *state = fe->demodulator_priv;
int ret;
pr_debug("%s: enable=%d\n", __func__, enable);
ret = af9033_wr_reg_mask(state, 0x00fa04, enable, 0x01);
if (ret < 0)
goto err;
return 0;
err:
pr_debug("%s: failed=%d\n", __func__, ret);
return ret;
}
static struct dvb_frontend_ops af9033_ops;
struct dvb_frontend *af9033_attach(const struct af9033_config *config,
struct i2c_adapter *i2c)
{
int ret;
struct af9033_state *state;
u8 buf[8];
pr_debug("%s:\n", __func__);
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct af9033_state), GFP_KERNEL);
if (state == NULL)
goto err;
/* setup the state */
state->i2c = i2c;
memcpy(&state->cfg, config, sizeof(struct af9033_config));
if (state->cfg.clock != 12000000) {
printk(KERN_INFO "af9033: unsupported clock=%d, only " \
"12000000 Hz is supported currently\n",
state->cfg.clock);
goto err;
}
/* firmware version */
ret = af9033_rd_regs(state, 0x0083e9, &buf[0], 4);
if (ret < 0)
goto err;
ret = af9033_rd_regs(state, 0x804191, &buf[4], 4);
if (ret < 0)
goto err;
printk(KERN_INFO "af9033: firmware version: LINK=%d.%d.%d.%d " \
"OFDM=%d.%d.%d.%d\n", buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6], buf[7]);
/* configure internal TS mode */
switch (state->cfg.ts_mode) {
case AF9033_TS_MODE_PARALLEL:
state->ts_mode_parallel = true;
break;
case AF9033_TS_MODE_SERIAL:
state->ts_mode_serial = true;
break;
case AF9033_TS_MODE_USB:
/* usb mode for AF9035 */
default:
break;
}
/* create dvb_frontend */
memcpy(&state->fe.ops, &af9033_ops, sizeof(struct dvb_frontend_ops));
state->fe.demodulator_priv = state;
return &state->fe;
err:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(af9033_attach);
static struct dvb_frontend_ops af9033_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Afatech AF9033 (DVB-T)",
.frequency_min = 174000000,
.frequency_max = 862000000,
.frequency_stepsize = 250000,
.frequency_tolerance = 0,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 |
FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
FE_CAN_QPSK |
FE_CAN_QAM_16 |
FE_CAN_QAM_64 |
FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO |
FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_HIERARCHY_AUTO |
FE_CAN_RECOVER |
FE_CAN_MUTE_TS
},
.release = af9033_release,
.init = af9033_init,
.sleep = af9033_sleep,
.get_tune_settings = af9033_get_tune_settings,
.set_frontend = af9033_set_frontend,
.get_frontend = af9033_get_frontend,
.read_status = af9033_read_status,
.read_snr = af9033_read_snr,
.read_signal_strength = af9033_read_signal_strength,
.read_ber = af9033_read_ber,
.read_ucblocks = af9033_read_ucblocks,
.i2c_gate_ctrl = af9033_i2c_gate_ctrl,
};
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Afatech AF9033 DVB-T demodulator driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
eousphoros/android_kernel_samsung_noblelte | drivers/staging/android/ion/exynos/exynos_ion_v2.c | 133 | 21678 | #ifdef CONFIG_OF_RESERVED_MEM
#include <linux/device.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/smc.h>
#include <linux/genalloc.h>
#include <linux/exynos_ion.h>
#include "../ion.h"
#include "../ion_priv.h"
struct ion_device *ion_exynos;
static DEFINE_SPINLOCK(smc_lock);
/* starting from index=1 regarding default index=0 for system heap */
static int nr_heaps = 1;
struct exynos_ion_platform_heap {
struct ion_platform_heap heap_data;
struct reserved_mem *rmem;
unsigned int id;
unsigned int compat_ids;
bool secure;
bool reusable;
bool protected;
struct kref secure_ref;
struct device dev;
struct ion_heap *heap;
struct mutex cma_lock;
};
static struct ion_platform_heap ion_noncontig_heap = {
.name = "ion_noncontig_heap",
.type = ION_HEAP_TYPE_SYSTEM,
.id = EXYNOS_ION_HEAP_SYSTEM_ID,
};
static struct exynos_ion_platform_heap plat_heaps[ION_NUM_HEAPS];
static int __find_platform_heap_id(unsigned int heap_id)
{
int i;
for (i = 0; i < nr_heaps; i++) {
if (heap_id == plat_heaps[i].id)
break;
}
if (i == nr_heaps)
return -EINVAL;
return i;
}
static void __ion_secure_protect(struct exynos_ion_platform_heap *pdata)
{
pr_info("%s: enter\n", __func__);
pdata->protected = true;
spin_lock(&smc_lock);
/* passing region info */
BUG_ON(exynos_smc(SMC_DRM_SECMEM_REGION_INFO, pdata->id - 1,
pdata->rmem->base, pdata->rmem->size) != 0);
/* protection */
BUG_ON(exynos_smc(SMC_DRM_SECMEM_REGION_PROT, pdata->id - 1,
SMC_PROTECTION_ENABLE, 0) != 0);
spin_unlock(&smc_lock);
pr_info("%s: protection enabled for heap %s\n", __func__,
pdata->heap->name);
}
int ion_secure_protect(struct ion_heap *heap)
{
struct exynos_ion_platform_heap *pdata;
int id;
id = __find_platform_heap_id(heap->id);
if (id < 0) {
pr_err("%s: invalid heap id(%d) for %s\n", __func__,
heap->id, heap->name);
return -EINVAL;
}
pdata = &plat_heaps[id];
if (!pdata->secure) {
pr_err("%s: heap %s is not secure heap\n", __func__, heap->name);
return -EPERM;
}
if (unlikely(atomic_read(&pdata->secure_ref.refcount) == 0)) {
kref_init(&pdata->secure_ref);
__ion_secure_protect(pdata);
} else {
kref_get(&pdata->secure_ref);
}
return 0;
}
static void __ion_secure_unprotect(struct kref *kref)
{
struct exynos_ion_platform_heap *pdata = container_of(kref,
struct exynos_ion_platform_heap, secure_ref);
pr_info("%s: enter\n", __func__);
spin_lock(&smc_lock);
/* unprotection */
BUG_ON(exynos_smc(SMC_DRM_SECMEM_REGION_PROT,
pdata->id - 1, SMC_PROTECTION_DISABLE, 0) != 0);
spin_unlock(&smc_lock);
pdata->protected = false;
pr_info("%s: protection disabled for heap %s\n", __func__,
pdata->heap->name);
}
int ion_secure_unprotect(struct ion_heap *heap)
{
struct exynos_ion_platform_heap *pdata;
int id;
id = __find_platform_heap_id(heap->id);
if (id < 0) {
pr_err("%s: invalid heap id(%d) for %s\n", __func__,
heap->id, heap->name);
return -EINVAL;
}
pdata = &plat_heaps[id];
if (!pdata->secure) {
pr_err("%s: heap %s is not secure heap\n", __func__,
pdata->heap->name);
return -EPERM;
}
kref_put(&pdata->secure_ref, __ion_secure_unprotect);
return 0;
}
bool ion_is_heap_available(struct ion_heap *heap,
unsigned long flags, void *data)
{
struct exynos_ion_platform_heap *pdata;
bool protected = !!(flags & ION_FLAG_PROTECTED);
struct cma_info info;
size_t free_size;
int id;
id = __find_platform_heap_id(heap->id);
if (id < 0) {
pr_err("%s: invalid heap id(%d) for %s\n", __func__,
heap->id, heap->name);
return false;
}
pdata = &plat_heaps[id];
if (protected == pdata->protected)
return true;
if (pdata->reusable) {
if (dma_contiguous_info(&pdata->dev, &info)) {
dev_err(&pdata->dev, "failed to retrieve "
"region information\n");
return false;
}
free_size = info.free;
} else {
if (!data) {
pr_err("%s: gen_pool required\n", __func__);
return false;
}
free_size = gen_pool_avail((struct gen_pool *) data);
}
if (free_size != pdata->rmem->size) {
pr_err("%s: heap %s is now in-use, total=%zd, free=%zd\n",
__func__, pdata->heap->name,
(size_t) pdata->rmem->size, free_size);
return false;
}
return true;
}
int ion_parse_heap_id(unsigned int heap_id_mask, unsigned int flags)
{
unsigned int heap_id = 1;
int i;
pr_debug("%s: heap_id_mask=%#x, flags=%#x\n",
__func__, heap_id_mask, flags);
if (heap_id_mask != EXYNOS_ION_HEAP_EXYNOS_CONTIG_MASK) {
if (heap_id_mask < (1 << ION_NUM_HEAPS)) {
return heap_id_mask;
} else {
pr_err("%s: bad heap id %#x\n", __func__,
heap_id_mask);
return -EINVAL;
}
}
if (flags & EXYNOS_ION_CONTIG_ID_MASK)
heap_id += BITS_PER_INT - __fls(flags & EXYNOS_ION_CONTIG_ID_MASK);
for (i = 1; i < nr_heaps; i++) {
if ((plat_heaps[i].id == heap_id) ||
(plat_heaps[i].compat_ids & (1 << heap_id)))
break;
}
if (i == nr_heaps) {
pr_err("%s: bad heap flags %#x\n", __func__, flags);
return -EINVAL;
}
pr_debug("%s: found new heap id %d for %s\n", __func__,
plat_heaps[i].id - 1, plat_heaps[i].heap_data.name);
return (1 << plat_heaps[i].id);
}
static void exynos_ion_rmem_device_init(struct reserved_mem *rmem,
struct device *dev)
{
/* Nothing to do */
}
static void exynos_ion_rmem_device_release(struct reserved_mem *rmem,
struct device *dev)
{
/* Nothing to do */
}
static const struct reserved_mem_ops exynos_ion_rmem_ops = {
.device_init = exynos_ion_rmem_device_init,
.device_release = exynos_ion_rmem_device_release,
};
static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem)
{
struct exynos_ion_platform_heap *pdata;
struct ion_platform_heap *heap_data;
unsigned long len = 0;
__be32 *prop;
BUG_ON(nr_heaps >= ION_NUM_HEAPS);
pdata = &plat_heaps[nr_heaps];
pdata->secure = !!of_get_flat_dt_prop(rmem->fdt_node, "secure", NULL);
pdata->reusable = !!of_get_flat_dt_prop(rmem->fdt_node, "reusable", NULL);
prop = of_get_flat_dt_prop(rmem->fdt_node, "id", &len);
if (!prop) {
pr_err("%s: no <id> found\n", __func__);
return -EINVAL;
}
len /= sizeof(int);
if (len != 1) {
pr_err("%s: wrong <id> field definition\n", __func__);
return -EINVAL;
}
/*
* id=0: system heap
* id=1 ~: contig heaps
*/
pdata->id = be32_to_cpu(prop[0]) + 1;
if (pdata->id >= ION_NUM_HEAPS) {
pr_err("%s: bad <id> number\n", __func__);
return -EINVAL;
}
prop = of_get_flat_dt_prop(rmem->fdt_node, "compat-id", &len);
if (prop) {
len /= sizeof(int);
while (len > 0) {
pdata->compat_ids |=
(1 << (be32_to_cpu(prop[--len]) + 1));
}
}
rmem->ops = &exynos_ion_rmem_ops;
pdata->rmem = rmem;
heap_data = &pdata->heap_data;
heap_data->id = pdata->id;
heap_data->name = rmem->name;
heap_data->base = rmem->base;
heap_data->size = rmem->size;
prop = of_get_flat_dt_prop(rmem->fdt_node, "alignment", &len);
if (!prop)
heap_data->align = PAGE_SIZE;
else
heap_data->align = be32_to_cpu(prop[0]);
if (pdata->reusable) {
heap_data->type = ION_HEAP_TYPE_DMA;
heap_data->priv = &pdata->dev;
if (dma_declare_contiguous(&pdata->dev, heap_data->size,
heap_data->base, MEMBLOCK_ALLOC_ANYWHERE)) {
pr_err("%s: failed to declare cma region %s\n",
__func__, heap_data->name);
return -EFAULT;
}
pr_info("CMA memory[%d]: %s:%#lx\n", heap_data->id,
heap_data->name, (unsigned long)rmem->size);
} else {
heap_data->type = ION_HEAP_TYPE_CARVEOUT;
heap_data->priv = rmem;
pr_info("Reserved memory[%d]: %s:%#lx\n", heap_data->id,
heap_data->name, (unsigned long)rmem->size);
}
atomic_set(&pdata->secure_ref.refcount, 0);
nr_heaps++;
return 0;
}
#define DECLARE_EXYNOS_ION_RESERVED_REGION(name) \
RESERVEDMEM_OF_DECLARE(name, "exynos5433-ion,"#name, exynos_ion_reserved_mem_setup)
DECLARE_EXYNOS_ION_RESERVED_REGION(common);
DECLARE_EXYNOS_ION_RESERVED_REGION(mfc_sh);
DECLARE_EXYNOS_ION_RESERVED_REGION(g2d_wfd);
DECLARE_EXYNOS_ION_RESERVED_REGION(video);
DECLARE_EXYNOS_ION_RESERVED_REGION(video_ext);
DECLARE_EXYNOS_ION_RESERVED_REGION(sectbl);
DECLARE_EXYNOS_ION_RESERVED_REGION(mfc_fw);
DECLARE_EXYNOS_ION_RESERVED_REGION(mfc_nfw);
DECLARE_EXYNOS_ION_RESERVED_REGION(secdma);
int ion_exynos_contig_heap_info(int region_id, phys_addr_t *phys, size_t *size)
{
int i;
for (i = 0; i < nr_heaps; i++) {
if (plat_heaps[i].id == region_id + 1) {
if (phys)
*phys = plat_heaps[i].rmem->base;
if (size)
*size = plat_heaps[i].rmem->size;
break;
}
}
return 0;
}
EXPORT_SYMBOL(ion_exynos_contig_heap_info);
#ifdef CONFIG_DMA_CMA
static struct class *ion_cma_class;
static int __init exynos_ion_create_cma_class(void)
{
ion_cma_class = class_create(THIS_MODULE, "ion_cma");
if (IS_ERR(ion_cma_class)) {
pr_err("%s: failed to create 'ion_cma' class - %ld\n",
__func__, PTR_ERR(ion_cma_class));
return PTR_ERR(ion_cma_class);
}
return 0;
}
static ssize_t region_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct exynos_ion_platform_heap *pdata = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", pdata->heap_data.name);
}
static ssize_t region_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct exynos_ion_platform_heap *pdata = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", pdata->id);
}
static ssize_t isolated_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cma_info info;
/*
* lock is not required
* because this just shows a snapshot of the information
*/
if (dma_contiguous_info(dev, &info)) {
dev_err(dev, "Failed to retrieve region information\n");
info.isolated = false;
}
return scnprintf(buf, PAGE_SIZE, "%d\n", info.isolated ? 1 : 0);
}
static int exynos_ion_isolate_thread(void *p)
{
if (dma_contiguous_isolate(p) != 0)
dev_err(p, "Failed to isolate\n");
flush_all_cpu_caches();
if (!signal_pending(current))
do_exit(0);
return 0;
}
static ssize_t isolated_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct exynos_ion_platform_heap *pdata = dev_get_drvdata(dev);
struct cma_info info;
long new;
int ret;
if (!dev) {
pr_err("%s: Invalid device info\n", __func__);
return -ENODEV;
}
if (IS_ERR_OR_NULL(pdata)) {
dev_err(dev, "Failed to get ion platform data\n");
return -ENODEV;
}
/**
* 0 : deisolation
* 1 : synchronous isolation
* 2 : asynchronous isolation
* others: error
*/
ret = kstrtol(buf, 0, &new);
if (ret)
return ret;
if ((new > 2) || (new < 0)) /* only 0, 1 and 2 are allowed */
return -EINVAL;
if (dma_contiguous_info(dev, &info)) {
dev_err(dev, "Failed to retrieve region information\n");
return -ENODEV;
}
mutex_lock(&pdata->cma_lock);
if (info.isolated != (new != 0)) {
if (!new) {
dma_contiguous_deisolate(dev);
} else if (new == 1) { /* synchronous isolation */
if (dma_contiguous_isolate(dev) != 0)
dev_err(dev, "Failed to isolate\n");
else
flush_all_cpu_caches();
} else { /* new == 2. asynchronous isolation */
struct task_struct *thr;
struct sched_param param = { .sched_priority = 0 };
thr = kthread_run(exynos_ion_isolate_thread,
dev, "cma_isolation:%s", dev_name(dev));
if (IS_ERR(thr))
dev_err(dev,
"Failed to create isolation thread\n");
else
sched_setscheduler(thr, SCHED_NORMAL, ¶m);
}
}
mutex_unlock(&pdata->cma_lock);
return count;
}
static struct device_attribute cma_regname_attr = __ATTR_RO(region_name);
static struct device_attribute cma_regid_attr = __ATTR_RO(region_id);
static DEVICE_ATTR(isolated, S_IRUSR | S_IWUSR, isolated_show, isolated_store);
static int __init exynos_ion_create_cma_devices(
struct exynos_ion_platform_heap *pdata)
{
struct device *dev;
int ret;
if (!pdata) {
pr_err("%s: heap_data must be given\n", __func__);
return -EINVAL;
}
dev = device_create(ion_cma_class, NULL, 0, pdata, "ion_%s",
pdata->heap_data.name);
if (IS_ERR(dev)) {
pr_err("%s: failed to create device of %s\n", __func__,
pdata->heap_data.name);
return -EINVAL;
}
dev_dbg(dev, "%s: Registered (region %d)\n", __func__, pdata->id);
dev->cma_area = pdata->dev.cma_area;
ret = device_create_file(dev, &cma_regid_attr);
if (ret)
dev_err(dev, "%s: failed to create %s file (%d)\n",
__func__, cma_regid_attr.attr.name, ret);
ret = device_create_file(dev, &cma_regname_attr);
if (ret)
dev_err(dev, "%s: failed to create %s file (%d)\n",
__func__, cma_regname_attr.attr.name, ret);
ret = device_create_file(dev, &dev_attr_isolated);
if (ret)
dev_err(dev, "%s: failed to create %s file (%d)\n",
__func__, dev_attr_isolated.attr.name, ret);
mutex_init(&pdata->cma_lock);
return 0;
}
#else
static int __init exynos_ion_create_cma_class(void)
{
return 0;
}
static int __init exynos_ion_create_cma_devices(
struct exynos_ion_platform_heap *pdata)
{
pr_err("%s: CMA should be configured for '%s'\n",
__func__, pdata->heap_data.name);
return 0;
}
#endif
static int exynos_ion_populate_heaps(struct platform_device *pdev,
struct ion_device *ion_dev)
{
int i, ret;
plat_heaps[0].reusable = false;
memcpy(&plat_heaps[0].heap_data, &ion_noncontig_heap,
sizeof(struct ion_platform_heap));
for (i = 0; i < nr_heaps; i++) {
plat_heaps[i].heap = ion_heap_create(&plat_heaps[i].heap_data);
if (IS_ERR(plat_heaps[i].heap)) {
pr_err("%s: failed to create heap %s[%d]\n", __func__,
plat_heaps[i].heap_data.name,
plat_heaps[i].id);
ret = PTR_ERR(plat_heaps[i].heap);
goto err;
}
ion_device_add_heap(ion_exynos, plat_heaps[i].heap);
if (plat_heaps[i].reusable)
exynos_ion_create_cma_devices(&plat_heaps[i]);
}
return 0;
err:
while (i-- > 0)
ion_heap_destroy(plat_heaps[i].heap);
return ret;
}
static int __init exynos_ion_probe(struct platform_device *pdev)
{
int ret;
ion_exynos = ion_device_create(NULL);
if (IS_ERR_OR_NULL(ion_exynos)) {
pr_err("%s: failed to create ion device\n", __func__);
return PTR_ERR(ion_exynos);
}
platform_set_drvdata(pdev, ion_exynos);
ret = exynos_ion_create_cma_class();
if (ret)
return ret;
return exynos_ion_populate_heaps(pdev, ion_exynos);
}
static int __devexit exynos_ion_remove(struct platform_device *pdev)
{
struct ion_device *idev = platform_get_drvdata(pdev);
int i;
ion_device_destroy(idev);
for (i = 0; i < nr_heaps; i++)
ion_heap_destroy(plat_heaps[i].heap);
return 0;
}
static struct of_device_id exynos_ion_of_match[] __initconst = {
{ .compatible = "samsung,exynos5430-ion", },
{ },
};
static struct platform_driver exynos_ion_driver __refdata = {
.probe = exynos_ion_probe,
.remove = exynos_ion_remove,
.driver = {
.owner = THIS_MODULE,
.name = "ion-exynos",
.of_match_table = of_match_ptr(exynos_ion_of_match),
}
};
static int __init exynos_ion_init(void)
{
return platform_driver_register(&exynos_ion_driver);
}
subsys_initcall(exynos_ion_init);
#ifdef CONFIG_HIGHMEM
#define exynos_sync_single_for_device(addr, size, dir) dmac_map_area(addr, size, dir)
#define exynos_sync_single_for_cpu(addr, size, dir) dmac_unmap_area(addr, size, dir)
#define exynos_sync_sg_for_device(dev, size, sg, nents, dir) \
ion_device_sync(ion_exynos, sgl, nents, dir, dmac_map_area, false)
#define exynos_sync_sg_for_cpu(dev, size, sg, nents, dir) \
ion_device_sync(ion_exynos, sgl, nents, dir, dmac_unmap_area, false)
#define exynos_sync_all flush_all_cpu_caches
#else
static void __exynos_sync_sg_for_device(struct device *dev, size_t size,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nelems, i) {
size_t sg_len = min(size, (size_t)sg->length);
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg_len, dir);
if (size > sg->length)
size -= sg->length;
else
break;
}
}
static void __exynos_sync_sg_for_cpu(struct device *dev, size_t size,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nelems, i) {
size_t sg_len = min(size, (size_t)sg->length);
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg_len, dir);
if (size > sg->length)
size -= sg->length;
else
break;
}
}
#define exynos_sync_single_for_device(addr, size, dir) __dma_map_area(addr, size, dir)
#define exynos_sync_single_for_cpu(addr, size, dir) __dma_unmap_area(addr, size, dir)
#define exynos_sync_sg_for_device(dev, size, sg, nents, dir) \
__exynos_sync_sg_for_device(dev, size, sg, nents, dir)
#define exynos_sync_sg_for_cpu(dev, size, sg, nents, dir) \
__exynos_sync_sg_for_cpu(dev, size, sg, nents, dir)
#define exynos_sync_all flush_all_cpu_caches
#endif
void exynos_ion_sync_dmabuf_for_device(struct device *dev,
struct dma_buf *dmabuf,
size_t size,
enum dma_data_direction dir)
{
struct ion_buffer *buffer = (struct ion_buffer *) dmabuf->priv;
if (IS_ERR_OR_NULL(buffer))
BUG();
if (!ion_buffer_cached(buffer) ||
ion_buffer_fault_user_mappings(buffer))
return;
mutex_lock(&buffer->lock);
pr_debug("%s: syncing for device %s, buffer: %p, size: %zd\n",
__func__, dev ? dev_name(dev) : "null", buffer, size);
trace_ion_sync_start(_RET_IP_, dev, dir, size,
buffer->vaddr, 0, size >= ION_FLUSH_ALL_HIGHLIMIT);
if (size >= ION_FLUSH_ALL_HIGHLIMIT)
exynos_sync_all();
else if (!IS_ERR_OR_NULL(buffer->vaddr))
exynos_sync_single_for_device(buffer->vaddr, size, dir);
else
exynos_sync_sg_for_device(dev, size, buffer->sg_table->sgl,
buffer->sg_table->nents, dir);
trace_ion_sync_end(_RET_IP_, dev, dir, size,
buffer->vaddr, 0, size >= ION_FLUSH_ALL_HIGHLIMIT);
mutex_unlock(&buffer->lock);
}
EXPORT_SYMBOL(exynos_ion_sync_dmabuf_for_device);
void exynos_ion_sync_vaddr_for_device(struct device *dev,
void *vaddr,
size_t size,
off_t offset,
enum dma_data_direction dir)
{
pr_debug("%s: syncing for device %s, vaddr: %p, size: %zd, offset: %ld\n",
__func__, dev ? dev_name(dev) : "null",
vaddr, size, offset);
trace_ion_sync_start(_RET_IP_, dev, dir, size,
vaddr, offset, size >= ION_FLUSH_ALL_HIGHLIMIT);
if (size >= ION_FLUSH_ALL_HIGHLIMIT)
exynos_sync_all();
else if (!IS_ERR_OR_NULL(vaddr))
exynos_sync_single_for_device(vaddr + offset, size, dir);
else
BUG();
trace_ion_sync_end(_RET_IP_, dev, dir, size,
vaddr, offset, size >= ION_FLUSH_ALL_HIGHLIMIT);
}
EXPORT_SYMBOL(exynos_ion_sync_vaddr_for_device);
void exynos_ion_sync_sg_for_device(struct device *dev, size_t size,
struct sg_table *sgt,
enum dma_data_direction dir)
{
trace_ion_sync_start(_RET_IP_, dev, dir, size,
0, 0, size >= ION_FLUSH_ALL_HIGHLIMIT);
if (size >= ION_FLUSH_ALL_HIGHLIMIT)
exynos_sync_all();
else
exynos_sync_sg_for_device(dev, size, sgt->sgl, sgt->nents, dir);
trace_ion_sync_end(_RET_IP_, dev, dir, size,
0, 0, size >= ION_FLUSH_ALL_HIGHLIMIT);
}
EXPORT_SYMBOL(exynos_ion_sync_sg_for_device);
void exynos_ion_sync_dmabuf_for_cpu(struct device *dev,
struct dma_buf *dmabuf,
size_t size,
enum dma_data_direction dir)
{
struct ion_buffer *buffer = (struct ion_buffer *) dmabuf->priv;
if (dir == DMA_TO_DEVICE)
return;
if (IS_ERR_OR_NULL(buffer))
BUG();
if (!ion_buffer_cached(buffer) ||
ion_buffer_fault_user_mappings(buffer))
return;
mutex_lock(&buffer->lock);
pr_debug("%s: syncing for cpu %s, buffer: %p, size: %zd\n",
__func__, dev ? dev_name(dev) : "null", buffer, size);
trace_ion_sync_start(_RET_IP_, dev, dir, size,
buffer->vaddr, 0, size >= ION_FLUSH_ALL_HIGHLIMIT);
if (size >= ION_FLUSH_ALL_HIGHLIMIT)
exynos_sync_all();
else if (!IS_ERR_OR_NULL(buffer->vaddr))
exynos_sync_single_for_cpu(buffer->vaddr, size, dir);
else
exynos_sync_sg_for_cpu(dev, size, buffer->sg_table->sgl,
buffer->sg_table->nents, dir);
trace_ion_sync_end(_RET_IP_, dev, dir, size,
buffer->vaddr, 0, size >= ION_FLUSH_ALL_HIGHLIMIT);
mutex_unlock(&buffer->lock);
}
EXPORT_SYMBOL(exynos_ion_sync_dmabuf_for_cpu);
void exynos_ion_sync_vaddr_for_cpu(struct device *dev,
void *vaddr,
size_t size,
off_t offset,
enum dma_data_direction dir)
{
if (dir == DMA_TO_DEVICE)
return;
pr_debug("%s: syncing for cpu %s, vaddr: %p, size: %zd, offset: %ld\n",
__func__, dev ? dev_name(dev) : "null",
vaddr, size, offset);
trace_ion_sync_start(_RET_IP_, dev, dir, size,
vaddr, offset, size >= ION_FLUSH_ALL_HIGHLIMIT);
if (size >= ION_FLUSH_ALL_HIGHLIMIT)
exynos_sync_all();
else if (!IS_ERR_OR_NULL(vaddr))
exynos_sync_single_for_cpu(vaddr + offset, size, dir);
else
BUG();
trace_ion_sync_end(_RET_IP_, dev, dir, size,
vaddr, offset, size >= ION_FLUSH_ALL_HIGHLIMIT);
}
EXPORT_SYMBOL(exynos_ion_sync_vaddr_for_cpu);
void exynos_ion_sync_sg_for_cpu(struct device *dev, size_t size,
struct sg_table *sgt,
enum dma_data_direction dir)
{
trace_ion_sync_start(_RET_IP_, dev, dir, size,
0, 0, size >= ION_FLUSH_ALL_HIGHLIMIT);
if (dir == DMA_TO_DEVICE)
return;
if (size >= ION_FLUSH_ALL_HIGHLIMIT)
exynos_sync_all();
else
exynos_sync_sg_for_cpu(dev, size, sgt->sgl, sgt->nents, dir);
trace_ion_sync_end(_RET_IP_, dev, dir, size,
0, 0, size >= ION_FLUSH_ALL_HIGHLIMIT);
}
EXPORT_SYMBOL(exynos_ion_sync_sg_for_cpu);
#endif
| gpl-2.0 |
AnneWanne1/Anne | x2/sha2.c | 389 | 15951 | /*
* Copyright 2011 ArtForz
* Copyright 2011-2013 pooler
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version. See COPYING for more details.
*/
#include "../cpuminer-config.h"
#include "../miner.h"
#include <string.h>
#include <stdint.h>
#if defined(__arm__) && defined(__APCS_32__)
#define EXTERN_SHA256
#endif
static const uint32_t sha256_h[8] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
};
static const uint32_t sha256_k[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
void sha256_init(uint32_t *state)
{
memcpy(state, sha256_h, 32);
}
/* Elementary functions used by SHA256 */
#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
#define Maj(x, y, z) ((x & (y | z)) | (y & z))
#define ROTR(x, n) ((x >> n) | (x << (32 - n)))
#define S0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
#define S1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
#define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ (x >> 3))
#define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ (x >> 10))
/* SHA256 round function */
#define RND(a, b, c, d, e, f, g, h, k) \
do { \
t0 = h + S1(e) + Ch(e, f, g) + k; \
t1 = S0(a) + Maj(a, b, c); \
d += t0; \
h = t0 + t1; \
} while (0)
/* Adjusted round function for rotating state */
#define RNDr(S, W, i) \
RND(S[(64 - i) % 8], S[(65 - i) % 8], \
S[(66 - i) % 8], S[(67 - i) % 8], \
S[(68 - i) % 8], S[(69 - i) % 8], \
S[(70 - i) % 8], S[(71 - i) % 8], \
W[i] + sha256_k[i])
#ifndef EXTERN_SHA256
/*
* SHA256 block compression function. The 256-bit state is transformed via
* the 512-bit input block to produce a new state.
*/
void sha256_transform(uint32_t *state, const uint32_t *block, int swap)
{
uint32_t W[64];
uint32_t S[8];
uint32_t t0, t1;
int i;
/* 1. Prepare message schedule W. */
if (swap) {
for (i = 0; i < 16; i++)
W[i] = swab32(block[i]);
} else
memcpy(W, block, 64);
for (i = 16; i < 64; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15];
}
/* 2. Initialize working variables. */
memcpy(S, state, 32);
/* 3. Mix. */
RNDr(S, W, 0);
RNDr(S, W, 1);
RNDr(S, W, 2);
RNDr(S, W, 3);
RNDr(S, W, 4);
RNDr(S, W, 5);
RNDr(S, W, 6);
RNDr(S, W, 7);
RNDr(S, W, 8);
RNDr(S, W, 9);
RNDr(S, W, 10);
RNDr(S, W, 11);
RNDr(S, W, 12);
RNDr(S, W, 13);
RNDr(S, W, 14);
RNDr(S, W, 15);
RNDr(S, W, 16);
RNDr(S, W, 17);
RNDr(S, W, 18);
RNDr(S, W, 19);
RNDr(S, W, 20);
RNDr(S, W, 21);
RNDr(S, W, 22);
RNDr(S, W, 23);
RNDr(S, W, 24);
RNDr(S, W, 25);
RNDr(S, W, 26);
RNDr(S, W, 27);
RNDr(S, W, 28);
RNDr(S, W, 29);
RNDr(S, W, 30);
RNDr(S, W, 31);
RNDr(S, W, 32);
RNDr(S, W, 33);
RNDr(S, W, 34);
RNDr(S, W, 35);
RNDr(S, W, 36);
RNDr(S, W, 37);
RNDr(S, W, 38);
RNDr(S, W, 39);
RNDr(S, W, 40);
RNDr(S, W, 41);
RNDr(S, W, 42);
RNDr(S, W, 43);
RNDr(S, W, 44);
RNDr(S, W, 45);
RNDr(S, W, 46);
RNDr(S, W, 47);
RNDr(S, W, 48);
RNDr(S, W, 49);
RNDr(S, W, 50);
RNDr(S, W, 51);
RNDr(S, W, 52);
RNDr(S, W, 53);
RNDr(S, W, 54);
RNDr(S, W, 55);
RNDr(S, W, 56);
RNDr(S, W, 57);
RNDr(S, W, 58);
RNDr(S, W, 59);
RNDr(S, W, 60);
RNDr(S, W, 61);
RNDr(S, W, 62);
RNDr(S, W, 63);
/* 4. Mix local working variables into global state */
for (i = 0; i < 8; i++)
state[i] += S[i];
}
#endif /* EXTERN_SHA256 */
static const uint32_t sha256d_hash1[16] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000100
};
static void sha256d_80_swap(uint32_t *hash, const uint32_t *data)
{
uint32_t S[16];
int i;
sha256_init(S);
sha256_transform(S, data, 0);
sha256_transform(S, data + 16, 0);
memcpy(S + 8, sha256d_hash1 + 8, 32);
sha256_init(hash);
sha256_transform(hash, S, 0);
for (i = 0; i < 8; i++)
hash[i] = swab32(hash[i]);
}
void sha256d(unsigned char *hash, const unsigned char *data, int len)
{
uint32_t S[16], T[16];
int i, r;
sha256_init(S);
for (r = len; r > -9; r -= 64) {
if (r < 64)
memset(T, 0, 64);
memcpy(T, data + len - r, r > 64 ? 64 : (r < 0 ? 0 : r));
if (r >= 0 && r < 64)
((unsigned char *)T)[r] = 0x80;
for (i = 0; i < 16; i++)
T[i] = be32dec(T + i);
if (r < 56)
T[15] = 8 * len;
sha256_transform(S, T, 0);
}
memcpy(S + 8, sha256d_hash1 + 8, 32);
sha256_init(T);
sha256_transform(T, S, 0);
for (i = 0; i < 8; i++)
be32enc((uint32_t *)hash + i, T[i]);
}
static inline void sha256d_preextend(uint32_t *W)
{
W[16] = s1(W[14]) + W[ 9] + s0(W[ 1]) + W[ 0];
W[17] = s1(W[15]) + W[10] + s0(W[ 2]) + W[ 1];
W[18] = s1(W[16]) + W[11] + W[ 2];
W[19] = s1(W[17]) + W[12] + s0(W[ 4]);
W[20] = W[13] + s0(W[ 5]) + W[ 4];
W[21] = W[14] + s0(W[ 6]) + W[ 5];
W[22] = W[15] + s0(W[ 7]) + W[ 6];
W[23] = W[16] + s0(W[ 8]) + W[ 7];
W[24] = W[17] + s0(W[ 9]) + W[ 8];
W[25] = s0(W[10]) + W[ 9];
W[26] = s0(W[11]) + W[10];
W[27] = s0(W[12]) + W[11];
W[28] = s0(W[13]) + W[12];
W[29] = s0(W[14]) + W[13];
W[30] = s0(W[15]) + W[14];
W[31] = s0(W[16]) + W[15];
}
static inline void sha256d_prehash(uint32_t *S, const uint32_t *W)
{
uint32_t t0, t1;
RNDr(S, W, 0);
RNDr(S, W, 1);
RNDr(S, W, 2);
}
#ifdef EXTERN_SHA256
void sha256d_ms(uint32_t *hash, uint32_t *W,
const uint32_t *midstate, const uint32_t *prehash);
#else
static inline void sha256d_ms(uint32_t *hash, uint32_t *W,
const uint32_t *midstate, const uint32_t *prehash)
{
uint32_t S[64];
uint32_t t0, t1;
int i;
S[18] = W[18];
S[19] = W[19];
S[20] = W[20];
S[22] = W[22];
S[23] = W[23];
S[24] = W[24];
S[30] = W[30];
S[31] = W[31];
W[18] += s0(W[3]);
W[19] += W[3];
W[20] += s1(W[18]);
W[21] = s1(W[19]);
W[22] += s1(W[20]);
W[23] += s1(W[21]);
W[24] += s1(W[22]);
W[25] = s1(W[23]) + W[18];
W[26] = s1(W[24]) + W[19];
W[27] = s1(W[25]) + W[20];
W[28] = s1(W[26]) + W[21];
W[29] = s1(W[27]) + W[22];
W[30] += s1(W[28]) + W[23];
W[31] += s1(W[29]) + W[24];
for (i = 32; i < 64; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15];
}
memcpy(S, prehash, 32);
RNDr(S, W, 3);
RNDr(S, W, 4);
RNDr(S, W, 5);
RNDr(S, W, 6);
RNDr(S, W, 7);
RNDr(S, W, 8);
RNDr(S, W, 9);
RNDr(S, W, 10);
RNDr(S, W, 11);
RNDr(S, W, 12);
RNDr(S, W, 13);
RNDr(S, W, 14);
RNDr(S, W, 15);
RNDr(S, W, 16);
RNDr(S, W, 17);
RNDr(S, W, 18);
RNDr(S, W, 19);
RNDr(S, W, 20);
RNDr(S, W, 21);
RNDr(S, W, 22);
RNDr(S, W, 23);
RNDr(S, W, 24);
RNDr(S, W, 25);
RNDr(S, W, 26);
RNDr(S, W, 27);
RNDr(S, W, 28);
RNDr(S, W, 29);
RNDr(S, W, 30);
RNDr(S, W, 31);
RNDr(S, W, 32);
RNDr(S, W, 33);
RNDr(S, W, 34);
RNDr(S, W, 35);
RNDr(S, W, 36);
RNDr(S, W, 37);
RNDr(S, W, 38);
RNDr(S, W, 39);
RNDr(S, W, 40);
RNDr(S, W, 41);
RNDr(S, W, 42);
RNDr(S, W, 43);
RNDr(S, W, 44);
RNDr(S, W, 45);
RNDr(S, W, 46);
RNDr(S, W, 47);
RNDr(S, W, 48);
RNDr(S, W, 49);
RNDr(S, W, 50);
RNDr(S, W, 51);
RNDr(S, W, 52);
RNDr(S, W, 53);
RNDr(S, W, 54);
RNDr(S, W, 55);
RNDr(S, W, 56);
RNDr(S, W, 57);
RNDr(S, W, 58);
RNDr(S, W, 59);
RNDr(S, W, 60);
RNDr(S, W, 61);
RNDr(S, W, 62);
RNDr(S, W, 63);
for (i = 0; i < 8; i++)
S[i] += midstate[i];
W[18] = S[18];
W[19] = S[19];
W[20] = S[20];
W[22] = S[22];
W[23] = S[23];
W[24] = S[24];
W[30] = S[30];
W[31] = S[31];
memcpy(S + 8, sha256d_hash1 + 8, 32);
S[16] = s1(sha256d_hash1[14]) + sha256d_hash1[ 9] + s0(S[ 1]) + S[ 0];
S[17] = s1(sha256d_hash1[15]) + sha256d_hash1[10] + s0(S[ 2]) + S[ 1];
S[18] = s1(S[16]) + sha256d_hash1[11] + s0(S[ 3]) + S[ 2];
S[19] = s1(S[17]) + sha256d_hash1[12] + s0(S[ 4]) + S[ 3];
S[20] = s1(S[18]) + sha256d_hash1[13] + s0(S[ 5]) + S[ 4];
S[21] = s1(S[19]) + sha256d_hash1[14] + s0(S[ 6]) + S[ 5];
S[22] = s1(S[20]) + sha256d_hash1[15] + s0(S[ 7]) + S[ 6];
S[23] = s1(S[21]) + S[16] + s0(sha256d_hash1[ 8]) + S[ 7];
S[24] = s1(S[22]) + S[17] + s0(sha256d_hash1[ 9]) + sha256d_hash1[ 8];
S[25] = s1(S[23]) + S[18] + s0(sha256d_hash1[10]) + sha256d_hash1[ 9];
S[26] = s1(S[24]) + S[19] + s0(sha256d_hash1[11]) + sha256d_hash1[10];
S[27] = s1(S[25]) + S[20] + s0(sha256d_hash1[12]) + sha256d_hash1[11];
S[28] = s1(S[26]) + S[21] + s0(sha256d_hash1[13]) + sha256d_hash1[12];
S[29] = s1(S[27]) + S[22] + s0(sha256d_hash1[14]) + sha256d_hash1[13];
S[30] = s1(S[28]) + S[23] + s0(sha256d_hash1[15]) + sha256d_hash1[14];
S[31] = s1(S[29]) + S[24] + s0(S[16]) + sha256d_hash1[15];
for (i = 32; i < 60; i += 2) {
S[i] = s1(S[i - 2]) + S[i - 7] + s0(S[i - 15]) + S[i - 16];
S[i+1] = s1(S[i - 1]) + S[i - 6] + s0(S[i - 14]) + S[i - 15];
}
S[60] = s1(S[58]) + S[53] + s0(S[45]) + S[44];
sha256_init(hash);
RNDr(hash, S, 0);
RNDr(hash, S, 1);
RNDr(hash, S, 2);
RNDr(hash, S, 3);
RNDr(hash, S, 4);
RNDr(hash, S, 5);
RNDr(hash, S, 6);
RNDr(hash, S, 7);
RNDr(hash, S, 8);
RNDr(hash, S, 9);
RNDr(hash, S, 10);
RNDr(hash, S, 11);
RNDr(hash, S, 12);
RNDr(hash, S, 13);
RNDr(hash, S, 14);
RNDr(hash, S, 15);
RNDr(hash, S, 16);
RNDr(hash, S, 17);
RNDr(hash, S, 18);
RNDr(hash, S, 19);
RNDr(hash, S, 20);
RNDr(hash, S, 21);
RNDr(hash, S, 22);
RNDr(hash, S, 23);
RNDr(hash, S, 24);
RNDr(hash, S, 25);
RNDr(hash, S, 26);
RNDr(hash, S, 27);
RNDr(hash, S, 28);
RNDr(hash, S, 29);
RNDr(hash, S, 30);
RNDr(hash, S, 31);
RNDr(hash, S, 32);
RNDr(hash, S, 33);
RNDr(hash, S, 34);
RNDr(hash, S, 35);
RNDr(hash, S, 36);
RNDr(hash, S, 37);
RNDr(hash, S, 38);
RNDr(hash, S, 39);
RNDr(hash, S, 40);
RNDr(hash, S, 41);
RNDr(hash, S, 42);
RNDr(hash, S, 43);
RNDr(hash, S, 44);
RNDr(hash, S, 45);
RNDr(hash, S, 46);
RNDr(hash, S, 47);
RNDr(hash, S, 48);
RNDr(hash, S, 49);
RNDr(hash, S, 50);
RNDr(hash, S, 51);
RNDr(hash, S, 52);
RNDr(hash, S, 53);
RNDr(hash, S, 54);
RNDr(hash, S, 55);
RNDr(hash, S, 56);
hash[2] += hash[6] + S1(hash[3]) + Ch(hash[3], hash[4], hash[5])
+ S[57] + sha256_k[57];
hash[1] += hash[5] + S1(hash[2]) + Ch(hash[2], hash[3], hash[4])
+ S[58] + sha256_k[58];
hash[0] += hash[4] + S1(hash[1]) + Ch(hash[1], hash[2], hash[3])
+ S[59] + sha256_k[59];
hash[7] += hash[3] + S1(hash[0]) + Ch(hash[0], hash[1], hash[2])
+ S[60] + sha256_k[60]
+ sha256_h[7];
}
#endif /* EXTERN_SHA256 */
#ifdef HAVE_SHA256_4WAY
void sha256d_ms_4way(uint32_t *hash, uint32_t *data,
const uint32_t *midstate, const uint32_t *prehash);
static inline int scanhash_sha256d_4way(int thr_id, uint32_t *pdata,
const uint32_t *ptarget, uint32_t max_nonce, unsigned long *hashes_done)
{
uint32_t data[4 * 64] __attribute__((aligned(128)));
uint32_t hash[4 * 8] __attribute__((aligned(32)));
uint32_t midstate[4 * 8] __attribute__((aligned(32)));
uint32_t prehash[4 * 8] __attribute__((aligned(32)));
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
int i, j;
memcpy(data, pdata + 16, 64);
sha256d_preextend(data);
for (i = 31; i >= 0; i--)
for (j = 0; j < 4; j++)
data[i * 4 + j] = data[i];
sha256_init(midstate);
sha256_transform(midstate, pdata, 0);
memcpy(prehash, midstate, 32);
sha256d_prehash(prehash, pdata + 16);
for (i = 7; i >= 0; i--) {
for (j = 0; j < 4; j++) {
midstate[i * 4 + j] = midstate[i];
prehash[i * 4 + j] = prehash[i];
}
}
do {
for (i = 0; i < 4; i++)
data[4 * 3 + i] = ++n;
sha256d_ms_4way(hash, data, midstate, prehash);
for (i = 0; i < 4; i++) {
if (swab32(hash[4 * 7 + i]) <= Htarg) {
pdata[19] = data[4 * 3 + i];
sha256d_80_swap(hash, pdata);
if (fulltest(hash, ptarget)) {
*hashes_done = n - first_nonce + 1;
return 1;
}
}
}
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return 0;
}
#endif /* HAVE_SHA256_4WAY */
#ifdef HAVE_SHA256_8WAY
void sha256d_ms_8way(uint32_t *hash, uint32_t *data,
const uint32_t *midstate, const uint32_t *prehash);
static inline int scanhash_sha256d_8way(int thr_id, uint32_t *pdata,
const uint32_t *ptarget, uint32_t max_nonce, unsigned long *hashes_done)
{
uint32_t data[8 * 64] __attribute__((aligned(128)));
uint32_t hash[8 * 8] __attribute__((aligned(32)));
uint32_t midstate[8 * 8] __attribute__((aligned(32)));
uint32_t prehash[8 * 8] __attribute__((aligned(32)));
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
int i, j;
memcpy(data, pdata + 16, 64);
sha256d_preextend(data);
for (i = 31; i >= 0; i--)
for (j = 0; j < 8; j++)
data[i * 8 + j] = data[i];
sha256_init(midstate);
sha256_transform(midstate, pdata, 0);
memcpy(prehash, midstate, 32);
sha256d_prehash(prehash, pdata + 16);
for (i = 7; i >= 0; i--) {
for (j = 0; j < 8; j++) {
midstate[i * 8 + j] = midstate[i];
prehash[i * 8 + j] = prehash[i];
}
}
do {
for (i = 0; i < 8; i++)
data[8 * 3 + i] = ++n;
sha256d_ms_8way(hash, data, midstate, prehash);
for (i = 0; i < 8; i++) {
if (swab32(hash[8 * 7 + i]) <= Htarg) {
pdata[19] = data[8 * 3 + i];
sha256d_80_swap(hash, pdata);
if (fulltest(hash, ptarget)) {
*hashes_done = n - first_nonce + 1;
return 1;
}
}
}
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return 0;
}
#endif /* HAVE_SHA256_8WAY */
int scanhash_sha256d(int thr_id, uint32_t *pdata, const uint32_t *ptarget,
uint32_t max_nonce, unsigned long *hashes_done)
{
uint32_t data[64] __attribute__((aligned(128)));
uint32_t hash[8] __attribute__((aligned(32)));
uint32_t midstate[8] __attribute__((aligned(32)));
uint32_t prehash[8] __attribute__((aligned(32)));
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
#ifdef HAVE_SHA256_8WAY
if (sha256_use_8way())
return scanhash_sha256d_8way(thr_id, pdata, ptarget,
max_nonce, hashes_done);
#endif
#ifdef HAVE_SHA256_4WAY
if (sha256_use_4way())
return scanhash_sha256d_4way(thr_id, pdata, ptarget,
max_nonce, hashes_done);
#endif
memcpy(data, pdata + 16, 64);
sha256d_preextend(data);
sha256_init(midstate);
sha256_transform(midstate, pdata, 0);
memcpy(prehash, midstate, 32);
sha256d_prehash(prehash, pdata + 16);
do {
data[3] = ++n;
sha256d_ms(hash, data, midstate, prehash);
if (swab32(hash[7]) <= Htarg) {
pdata[19] = data[3];
sha256d_80_swap(hash, pdata);
if (fulltest(hash, ptarget)) {
*hashes_done = n - first_nonce + 1;
return 1;
}
}
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return 0;
}
| gpl-2.0 |
hramrach/linux-sunxi | drivers/gpu/drm/qxl/qxl_cmd.c | 645 | 18021 | /*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
/* QXL cmd/ring handling */
#include "qxl_drv.h"
#include "qxl_object.h"
static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
struct ring {
struct qxl_ring_header header;
uint8_t elements[0];
};
struct qxl_ring {
struct ring *ring;
int element_size;
int n_elements;
int prod_notify;
wait_queue_head_t *push_event;
spinlock_t lock;
};
void qxl_ring_free(struct qxl_ring *ring)
{
kfree(ring);
}
void qxl_ring_init_hdr(struct qxl_ring *ring)
{
ring->ring->header.notify_on_prod = ring->n_elements;
}
struct qxl_ring *
qxl_ring_create(struct qxl_ring_header *header,
int element_size,
int n_elements,
int prod_notify,
bool set_prod_notify,
wait_queue_head_t *push_event)
{
struct qxl_ring *ring;
ring = kmalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
return NULL;
ring->ring = (struct ring *)header;
ring->element_size = element_size;
ring->n_elements = n_elements;
ring->prod_notify = prod_notify;
ring->push_event = push_event;
if (set_prod_notify)
qxl_ring_init_hdr(ring);
spin_lock_init(&ring->lock);
return ring;
}
static int qxl_check_header(struct qxl_ring *ring)
{
int ret;
struct qxl_ring_header *header = &(ring->ring->header);
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
ret = header->prod - header->cons < header->num_items;
if (ret == 0)
header->notify_on_cons = header->cons + 1;
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
int qxl_check_idle(struct qxl_ring *ring)
{
int ret;
struct qxl_ring_header *header = &(ring->ring->header);
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
ret = header->prod == header->cons;
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
int qxl_ring_push(struct qxl_ring *ring,
const void *new_elt, bool interruptible)
{
struct qxl_ring_header *header = &(ring->ring->header);
uint8_t *elt;
int idx, ret;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
if (header->prod - header->cons == header->num_items) {
header->notify_on_cons = header->cons + 1;
mb();
spin_unlock_irqrestore(&ring->lock, flags);
if (!drm_can_sleep()) {
while (!qxl_check_header(ring))
udelay(1);
} else {
if (interruptible) {
ret = wait_event_interruptible(*ring->push_event,
qxl_check_header(ring));
if (ret)
return ret;
} else {
wait_event(*ring->push_event,
qxl_check_header(ring));
}
}
spin_lock_irqsave(&ring->lock, flags);
}
idx = header->prod & (ring->n_elements - 1);
elt = ring->ring->elements + idx * ring->element_size;
memcpy((void *)elt, new_elt, ring->element_size);
header->prod++;
mb();
if (header->prod == header->notify_on_prod)
outb(0, ring->prod_notify);
spin_unlock_irqrestore(&ring->lock, flags);
return 0;
}
static bool qxl_ring_pop(struct qxl_ring *ring,
void *element)
{
volatile struct qxl_ring_header *header = &(ring->ring->header);
volatile uint8_t *ring_elt;
int idx;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
if (header->cons == header->prod) {
header->notify_on_prod = header->cons + 1;
spin_unlock_irqrestore(&ring->lock, flags);
return false;
}
idx = header->cons & (ring->n_elements - 1);
ring_elt = ring->ring->elements + idx * ring->element_size;
memcpy(element, (void *)ring_elt, ring->element_size);
header->cons++;
spin_unlock_irqrestore(&ring->lock, flags);
return true;
}
int
qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
}
int
qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
}
bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
{
if (!qxl_check_idle(qdev->release_ring)) {
queue_work(qdev->gc_queue, &qdev->gc_work);
if (flush)
flush_work(&qdev->gc_work);
return true;
}
return false;
}
int qxl_garbage_collect(struct qxl_device *qdev)
{
struct qxl_release *release;
uint64_t id, next_id;
int i = 0;
union qxl_release_info *info;
while (qxl_ring_pop(qdev->release_ring, &id)) {
QXL_INFO(qdev, "popped %lld\n", id);
while (id) {
release = qxl_release_from_id_locked(qdev, id);
if (release == NULL)
break;
info = qxl_release_map(qdev, release);
next_id = info->next;
qxl_release_unmap(qdev, release, info);
QXL_INFO(qdev, "popped %lld, next %lld\n", id,
next_id);
switch (release->type) {
case QXL_RELEASE_DRAWABLE:
case QXL_RELEASE_SURFACE_CMD:
case QXL_RELEASE_CURSOR_CMD:
break;
default:
DRM_ERROR("unexpected release type\n");
break;
}
id = next_id;
qxl_release_free(qdev, release);
++i;
}
}
QXL_INFO(qdev, "%s: %d\n", __func__, i);
return i;
}
int qxl_alloc_bo_reserved(struct qxl_device *qdev,
struct qxl_release *release,
unsigned long size,
struct qxl_bo **_bo)
{
struct qxl_bo *bo;
int ret;
ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
if (ret) {
DRM_ERROR("failed to allocate VRAM BO\n");
return ret;
}
ret = qxl_release_list_add(release, bo);
if (ret)
goto out_unref;
*_bo = bo;
return 0;
out_unref:
qxl_bo_unref(&bo);
return ret;
}
static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
{
int irq_num;
long addr = qdev->io_base + port;
int ret;
mutex_lock(&qdev->async_io_mutex);
irq_num = atomic_read(&qdev->irq_received_io_cmd);
if (qdev->last_sent_io_cmd > irq_num) {
if (intr)
ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
else
ret = wait_event_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
/* 0 is timeout, just bail the "hw" has gone away */
if (ret <= 0)
goto out;
irq_num = atomic_read(&qdev->irq_received_io_cmd);
}
outb(val, addr);
qdev->last_sent_io_cmd = irq_num + 1;
if (intr)
ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
else
ret = wait_event_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
out:
if (ret > 0)
ret = 0;
mutex_unlock(&qdev->async_io_mutex);
return ret;
}
static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
{
int ret;
restart:
ret = wait_for_io_cmd_user(qdev, val, port, false);
if (ret == -ERESTARTSYS)
goto restart;
}
int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
const struct qxl_rect *area)
{
int surface_id;
uint32_t surface_width, surface_height;
int ret;
if (!surf->hw_surf_alloc)
DRM_ERROR("got io update area with no hw surface\n");
if (surf->is_primary)
surface_id = 0;
else
surface_id = surf->surface_id;
surface_width = surf->surf.width;
surface_height = surf->surf.height;
if (area->left < 0 || area->top < 0 ||
area->right > surface_width || area->bottom > surface_height) {
qxl_io_log(qdev, "%s: not doing area update for "
"%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
area->top, area->right, area->bottom, surface_width, surface_height);
return -EINVAL;
}
mutex_lock(&qdev->update_area_mutex);
qdev->ram_header->update_area = *area;
qdev->ram_header->update_surface = surface_id;
ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
mutex_unlock(&qdev->update_area_mutex);
return ret;
}
void qxl_io_notify_oom(struct qxl_device *qdev)
{
outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
}
void qxl_io_flush_release(struct qxl_device *qdev)
{
outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
}
void qxl_io_flush_surfaces(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
}
void qxl_io_destroy_primary(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
}
void qxl_io_create_primary(struct qxl_device *qdev,
unsigned offset, struct qxl_bo *bo)
{
struct qxl_surface_create *create;
QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
qdev->ram_header);
create = &qdev->ram_header->create_surface;
create->format = bo->surf.format;
create->width = bo->surf.width;
create->height = bo->surf.height;
create->stride = bo->surf.stride;
create->mem = qxl_bo_physical_address(qdev, bo, offset);
QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
bo->kptr);
create->flags = QXL_SURF_FLAG_KEEP_DATA;
create->type = QXL_SURF_TYPE_PRIMARY;
wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
}
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
{
QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
}
void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
va_end(args);
/*
* DO not do a DRM output here - this will call printk, which will
* call back into qxl for rendering (qxl_fb)
*/
outb(0, qdev->io_base + QXL_IO_LOG);
}
void qxl_io_reset(struct qxl_device *qdev)
{
outb(0, qdev->io_base + QXL_IO_RESET);
}
void qxl_io_monitors_config(struct qxl_device *qdev)
{
qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
qdev->monitors_config ?
qdev->monitors_config->count : -1,
qdev->monitors_config && qdev->monitors_config->count ?
qdev->monitors_config->heads[0].width : -1,
qdev->monitors_config && qdev->monitors_config->count ?
qdev->monitors_config->heads[0].height : -1,
qdev->monitors_config && qdev->monitors_config->count ?
qdev->monitors_config->heads[0].x : -1,
qdev->monitors_config && qdev->monitors_config->count ?
qdev->monitors_config->heads[0].y : -1
);
wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
}
int qxl_surface_id_alloc(struct qxl_device *qdev,
struct qxl_bo *surf)
{
uint32_t handle;
int idr_ret;
int count = 0;
again:
idr_preload(GFP_ATOMIC);
spin_lock(&qdev->surf_id_idr_lock);
idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
spin_unlock(&qdev->surf_id_idr_lock);
idr_preload_end();
if (idr_ret < 0)
return idr_ret;
handle = idr_ret;
if (handle >= qdev->rom->n_surfaces) {
count++;
spin_lock(&qdev->surf_id_idr_lock);
idr_remove(&qdev->surf_id_idr, handle);
spin_unlock(&qdev->surf_id_idr_lock);
qxl_reap_surface_id(qdev, 2);
goto again;
}
surf->surface_id = handle;
spin_lock(&qdev->surf_id_idr_lock);
qdev->last_alloced_surf_id = handle;
spin_unlock(&qdev->surf_id_idr_lock);
return 0;
}
void qxl_surface_id_dealloc(struct qxl_device *qdev,
uint32_t surface_id)
{
spin_lock(&qdev->surf_id_idr_lock);
idr_remove(&qdev->surf_id_idr, surface_id);
spin_unlock(&qdev->surf_id_idr_lock);
}
int qxl_hw_surface_alloc(struct qxl_device *qdev,
struct qxl_bo *surf,
struct ttm_mem_reg *new_mem)
{
struct qxl_surface_cmd *cmd;
struct qxl_release *release;
int ret;
if (surf->hw_surf_alloc)
return 0;
ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
NULL,
&release);
if (ret)
return ret;
ret = qxl_release_reserve_list(release, true);
if (ret)
return ret;
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
cmd->u.surface_create.format = surf->surf.format;
cmd->u.surface_create.width = surf->surf.width;
cmd->u.surface_create.height = surf->surf.height;
cmd->u.surface_create.stride = surf->surf.stride;
if (new_mem) {
int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
/* TODO - need to hold one of the locks to read tbo.offset */
cmd->u.surface_create.data = slot->high_bits;
cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
} else
cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
cmd->surface_id = surf->surface_id;
qxl_release_unmap(qdev, release, &cmd->release_info);
surf->surf_create = release;
/* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
spin_unlock(&qdev->surf_id_idr_lock);
return 0;
}
int qxl_hw_surface_dealloc(struct qxl_device *qdev,
struct qxl_bo *surf)
{
struct qxl_surface_cmd *cmd;
struct qxl_release *release;
int ret;
int id;
if (!surf->hw_surf_alloc)
return 0;
ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
surf->surf_create,
&release);
if (ret)
return ret;
surf->surf_create = NULL;
/* remove the surface from the idr, but not the surface id yet */
spin_lock(&qdev->surf_id_idr_lock);
idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
spin_unlock(&qdev->surf_id_idr_lock);
surf->hw_surf_alloc = false;
id = surf->surface_id;
surf->surface_id = 0;
release->surface_release_id = id;
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_DESTROY;
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release);
return 0;
}
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
{
struct qxl_rect rect;
int ret;
/* if we are evicting, we need to make sure the surface is up
to date */
rect.left = 0;
rect.right = surf->surf.width;
rect.top = 0;
rect.bottom = surf->surf.height;
retry:
ret = qxl_io_update_area(qdev, surf, &rect);
if (ret == -ERESTARTSYS)
goto retry;
return ret;
}
static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
{
/* no need to update area if we are just freeing the surface normally */
if (do_update_area)
qxl_update_surface(qdev, surf);
/* nuke the surface id at the hw */
qxl_hw_surface_dealloc(qdev, surf);
}
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
{
mutex_lock(&qdev->surf_evict_mutex);
qxl_surface_evict_locked(qdev, surf, do_update_area);
mutex_unlock(&qdev->surf_evict_mutex);
}
static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
{
int ret;
ret = qxl_bo_reserve(surf, false);
if (ret)
return ret;
if (stall)
mutex_unlock(&qdev->surf_evict_mutex);
ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
if (stall)
mutex_lock(&qdev->surf_evict_mutex);
if (ret) {
qxl_bo_unreserve(surf);
return ret;
}
qxl_surface_evict_locked(qdev, surf, true);
qxl_bo_unreserve(surf);
return 0;
}
static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
{
int num_reaped = 0;
int i, ret;
bool stall = false;
int start = 0;
mutex_lock(&qdev->surf_evict_mutex);
again:
spin_lock(&qdev->surf_id_idr_lock);
start = qdev->last_alloced_surf_id + 1;
spin_unlock(&qdev->surf_id_idr_lock);
for (i = start; i < start + qdev->rom->n_surfaces; i++) {
void *objptr;
int surfid = i % qdev->rom->n_surfaces;
/* this avoids the case where the objects is in the
idr but has been evicted half way - its makes
the idr lookup atomic with the eviction */
spin_lock(&qdev->surf_id_idr_lock);
objptr = idr_find(&qdev->surf_id_idr, surfid);
spin_unlock(&qdev->surf_id_idr_lock);
if (!objptr)
continue;
ret = qxl_reap_surf(qdev, objptr, stall);
if (ret == 0)
num_reaped++;
if (num_reaped >= max_to_reap)
break;
}
if (num_reaped == 0 && stall == false) {
stall = true;
goto again;
}
mutex_unlock(&qdev->surf_evict_mutex);
if (num_reaped) {
usleep_range(500, 1000);
qxl_queue_garbage_collect(qdev, true);
}
return 0;
}
| gpl-2.0 |
DaemonGG/LARP_kernel3.16.0 | arch/mips/mti-malta/malta-setup.c | 901 | 7797 | /*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2008 Dmitri Vorobiev
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/screen_info.h>
#include <linux/time.h>
#include <asm/fw/fw.h>
#include <asm/mips-cm.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/malta.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/dma.h>
#include <asm/traps.h>
#ifdef CONFIG_VT
#include <linux/console.h>
#endif
extern void malta_be_init(void);
extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
static struct resource standard_io_resources[] = {
{
.name = "dma1",
.start = 0x00,
.end = 0x1f,
.flags = IORESOURCE_BUSY
},
{
.name = "timer",
.start = 0x40,
.end = 0x5f,
.flags = IORESOURCE_BUSY
},
{
.name = "keyboard",
.start = 0x60,
.end = 0x6f,
.flags = IORESOURCE_BUSY
},
{
.name = "dma page reg",
.start = 0x80,
.end = 0x8f,
.flags = IORESOURCE_BUSY
},
{
.name = "dma2",
.start = 0xc0,
.end = 0xdf,
.flags = IORESOURCE_BUSY
},
};
const char *get_system_type(void)
{
return "MIPS Malta";
}
const char display_string[] = " LINUX ON MALTA ";
#ifdef CONFIG_BLK_DEV_FD
static void __init fd_activate(void)
{
/*
* Activate Floppy Controller in the SMSC FDC37M817 Super I/O
* Controller.
* Done by YAMON 2.00 onwards
*/
/* Entering config state. */
SMSC_WRITE(SMSC_CONFIG_ENTER, SMSC_CONFIG_REG);
/* Activate floppy controller. */
SMSC_WRITE(SMSC_CONFIG_DEVNUM, SMSC_CONFIG_REG);
SMSC_WRITE(SMSC_CONFIG_DEVNUM_FLOPPY, SMSC_DATA_REG);
SMSC_WRITE(SMSC_CONFIG_ACTIVATE, SMSC_CONFIG_REG);
SMSC_WRITE(SMSC_CONFIG_ACTIVATE_ENABLE, SMSC_DATA_REG);
/* Exit config state. */
SMSC_WRITE(SMSC_CONFIG_EXIT, SMSC_CONFIG_REG);
}
#endif
static int __init plat_enable_iocoherency(void)
{
int supported = 0;
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
pr_info("Enabled Bonito CPU coherency\n");
supported = 1;
}
if (strstr(fw_getcmdline(), "iobcuncached")) {
BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
pr_info("Disabled Bonito IOBC coherency\n");
} else {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
BONITO_PCIMEMBASECFG |=
(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
pr_info("Enabled Bonito IOBC coherency\n");
}
} else if (mips_cm_numiocu() != 0) {
/* Nothing special needs to be done to enable coherency */
pr_info("CMP IOCU detected\n");
if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
return 0;
}
supported = 1;
}
hw_coherentio = supported;
return supported;
}
static void __init plat_setup_iocoherency(void)
{
#ifdef CONFIG_DMA_NONCOHERENT
/*
* Kernel has been configured with software coherency
* but we might choose to turn it off and use hardware
* coherency instead.
*/
if (plat_enable_iocoherency()) {
if (coherentio == 0)
pr_info("Hardware DMA cache coherency disabled\n");
else
pr_info("Hardware DMA cache coherency enabled\n");
} else {
if (coherentio == 1)
pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
else
pr_info("Software DMA cache coherency enabled\n");
}
#else
if (!plat_enable_iocoherency())
panic("Hardware DMA cache coherency not supported!");
#endif
}
static void __init pci_clock_check(void)
{
unsigned int __iomem *jmpr_p =
(unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07;
static const int pciclocks[] __initconst = {
33, 20, 25, 30, 12, 16, 37, 10
};
int pciclock = pciclocks[jmpr];
char *optptr, *argptr = fw_getcmdline();
/*
* If user passed a pci_clock= option, don't tack on another one
*/
optptr = strstr(argptr, "pci_clock=");
if (optptr && (optptr == argptr || optptr[-1] == ' '))
return;
if (pciclock != 33) {
pr_warn("WARNING: PCI clock is %dMHz, setting pci_clock\n",
pciclock);
argptr += strlen(argptr);
sprintf(argptr, " pci_clock=%d", pciclock);
if (pciclock < 20 || pciclock > 66)
pr_warn("WARNING: IDE timing calculations will be "
"incorrect\n");
}
}
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
static void __init screen_info_setup(void)
{
screen_info = (struct screen_info) {
.orig_x = 0,
.orig_y = 25,
.ext_mem_k = 0,
.orig_video_page = 0,
.orig_video_mode = 0,
.orig_video_cols = 80,
.unused2 = 0,
.orig_video_ega_bx = 0,
.unused3 = 0,
.orig_video_lines = 25,
.orig_video_isVGA = VIDEO_TYPE_VGAC,
.orig_video_points = 16
};
}
#endif
static void __init bonito_quirks_setup(void)
{
char *argptr;
argptr = fw_getcmdline();
if (strstr(argptr, "debug")) {
BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
pr_info("Enabled Bonito debug mode\n");
} else
BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
#ifdef CONFIG_DMA_COHERENT
if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
pr_info("Enabled Bonito CPU coherency\n");
argptr = fw_getcmdline();
if (strstr(argptr, "iobcuncached")) {
BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
pr_info("Disabled Bonito IOBC coherency\n");
} else {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
BONITO_PCIMEMBASECFG |=
(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
pr_info("Enabled Bonito IOBC coherency\n");
}
} else
panic("Hardware DMA cache coherency not supported");
#endif
}
void __init plat_mem_setup(void)
{
unsigned int i;
if (config_enabled(CONFIG_EVA))
/* EVA has already been configured in mach-malta/kernel-init.h */
pr_info("Enhanced Virtual Addressing (EVA) activated\n");
mips_pcibios_init();
/* Request I/O space for devices used on the Malta board. */
for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
request_resource(&ioport_resource, standard_io_resources+i);
/*
* Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.
*/
enable_dma(4);
#ifdef CONFIG_DMA_COHERENT
if (mips_revision_sconid != MIPS_REVISION_SCON_BONITO)
panic("Hardware DMA cache coherency not supported");
#endif
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
bonito_quirks_setup();
plat_setup_iocoherency();
pci_clock_check();
#ifdef CONFIG_BLK_DEV_FD
fd_activate();
#endif
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
screen_info_setup();
#endif
board_be_init = malta_be_init;
board_be_handler = malta_be_handler;
}
| gpl-2.0 |
Sohamlad7/kernel | drivers/cpufreq/cpufreq-cpu0.c | 1157 | 7619 | /*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
*
* The OPP code in function cpu0_set_target() is reused from
* drivers/cpufreq/omap-cpufreq.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
static unsigned int transition_latency;
static unsigned int voltage_tolerance; /* in percentage */
static struct device *cpu_dev;
static struct clk *cpu_clk;
static struct regulator *cpu_reg;
static struct cpufreq_frequency_table *freq_table;
static int cpu0_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
static unsigned int cpu0_get_speed(unsigned int cpu)
{
return clk_get_rate(cpu_clk) / 1000;
}
static int cpu0_set_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
struct cpufreq_freqs freqs;
struct opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
long freq_Hz, freq_exact;
unsigned int index;
int ret;
ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
relation, &index);
if (ret) {
pr_err("failed to match target freqency %d: %d\n",
target_freq, ret);
return ret;
}
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz < 0)
freq_Hz = freq_table[index].frequency * 1000;
freq_exact = freq_Hz;
freqs.new = freq_Hz / 1000;
freqs.old = clk_get_rate(cpu_clk) / 1000;
if (freqs.old == freqs.new)
return 0;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (cpu_reg) {
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
freqs.new = freqs.old;
ret = PTR_ERR(opp);
goto post_notify;
}
volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol = volt * voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
}
pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
freqs.new / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
if (cpu_reg && freqs.new > freqs.old) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage up: %d\n", ret);
freqs.new = freqs.old;
goto post_notify;
}
}
ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
pr_err("failed to set clock rate: %d\n", ret);
if (cpu_reg)
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
freqs.new = freqs.old;
goto post_notify;
}
/* scaling down? scale voltage after frequency */
if (cpu_reg && freqs.new < freqs.old) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage down: %d\n", ret);
clk_set_rate(cpu_clk, freqs.old * 1000);
freqs.new = freqs.old;
}
}
post_notify:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (ret) {
pr_err("invalid frequency table: %d\n", ret);
return ret;
}
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = clk_get_rate(cpu_clk) / 1000;
/*
* The driver only supports the SMP configuartion where all processors
* share the clock and voltage and clock. Use cpufreq affected_cpus
* interface to have all CPUs scaled together.
*/
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
return 0;
}
static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr *cpu0_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver cpu0_cpufreq_driver = {
.flags = CPUFREQ_STICKY,
.verify = cpu0_verify_speed,
.target = cpu0_set_target,
.get = cpu0_get_speed,
.init = cpu0_cpufreq_init,
.exit = cpu0_cpufreq_exit,
.name = "generic_cpu0",
.attr = cpu0_cpufreq_attr,
};
static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np, *parent;
int ret;
parent = of_find_node_by_path("/cpus");
if (!parent) {
pr_err("failed to find OF /cpus\n");
return -ENOENT;
}
for_each_child_of_node(parent, np) {
if (of_get_property(np, "operating-points", NULL))
break;
}
if (!np) {
pr_err("failed to find cpu0 node\n");
ret = -ENOENT;
goto out_put_parent;
}
cpu_dev = &pdev->dev;
cpu_dev->of_node = np;
cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) {
/*
* If cpu0 regulator supply node is present, but regulator is
* not yet registered, we should try defering probe.
*/
if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
ret = -EPROBE_DEFER;
goto out_put_node;
}
pr_warn("failed to get cpu0 regulator: %ld\n",
PTR_ERR(cpu_reg));
cpu_reg = NULL;
}
cpu_clk = devm_clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
pr_err("failed to get cpu0 clock: %d\n", ret);
goto out_put_node;
}
ret = of_init_opp_table(cpu_dev);
if (ret) {
pr_err("failed to init OPP table: %d\n", ret);
goto out_put_node;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
goto out_put_node;
}
of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
if (of_property_read_u32(np, "clock-latency", &transition_latency))
transition_latency = CPUFREQ_ETERNAL;
if (cpu_reg) {
struct opp *opp;
unsigned long min_uV, max_uV;
int i;
/*
* OPP is maintained in order of increasing frequency, and
* freq_table initialised from OPP is therefore sorted in the
* same order.
*/
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
;
rcu_read_lock();
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
min_uV = dev_pm_opp_get_voltage(opp);
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[i-1].frequency * 1000, true);
max_uV = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
transition_latency += ret * 1000;
}
ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
if (ret) {
pr_err("failed register driver: %d\n", ret);
goto out_free_table;
}
of_node_put(np);
of_node_put(parent);
return 0;
out_free_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node:
of_node_put(np);
out_put_parent:
of_node_put(parent);
return ret;
}
static int cpu0_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpu0_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
static struct platform_driver cpu0_cpufreq_platdrv = {
.driver = {
.name = "cpufreq-cpu0",
.owner = THIS_MODULE,
},
.probe = cpu0_cpufreq_probe,
.remove = cpu0_cpufreq_remove,
};
module_platform_driver(cpu0_cpufreq_platdrv);
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bedwa/VaudeVille | drivers/iommu/amd_iommu_v2.c | 1925 | 23151 | /*
* Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mmu_notifier.h>
#include <linux/amd-iommu.h>
#include <linux/mm_types.h>
#include <linux/profile.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/iommu.h>
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include "amd_iommu_types.h"
#include "amd_iommu_proto.h"
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
#define MAX_DEVICES 0x10000
#define PRI_QUEUE_SIZE 512
struct pri_queue {
atomic_t inflight;
bool finish;
int status;
};
struct pasid_state {
struct list_head list; /* For global state-list */
atomic_t count; /* Reference count */
struct task_struct *task; /* Task bound to this PASID */
struct mm_struct *mm; /* mm_struct for the faults */
struct mmu_notifier mn; /* mmu_otifier handle */
struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
struct device_state *device_state; /* Link to our device_state */
int pasid; /* PASID index */
spinlock_t lock; /* Protect pri_queues */
wait_queue_head_t wq; /* To wait for count == 0 */
};
struct device_state {
atomic_t count;
struct pci_dev *pdev;
struct pasid_state **states;
struct iommu_domain *domain;
int pasid_levels;
int max_pasids;
amd_iommu_invalid_ppr_cb inv_ppr_cb;
amd_iommu_invalidate_ctx inv_ctx_cb;
spinlock_t lock;
wait_queue_head_t wq;
};
struct fault {
struct work_struct work;
struct device_state *dev_state;
struct pasid_state *state;
struct mm_struct *mm;
u64 address;
u16 devid;
u16 pasid;
u16 tag;
u16 finish;
u16 flags;
};
struct device_state **state_table;
static spinlock_t state_lock;
/* List and lock for all pasid_states */
static LIST_HEAD(pasid_state_list);
static DEFINE_SPINLOCK(ps_lock);
static struct workqueue_struct *iommu_wq;
/*
* Empty page table - Used between
* mmu_notifier_invalidate_range_start and
* mmu_notifier_invalidate_range_end
*/
static u64 *empty_page_table;
static void free_pasid_states(struct device_state *dev_state);
static void unbind_pasid(struct device_state *dev_state, int pasid);
static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
static u16 device_id(struct pci_dev *pdev)
{
u16 devid;
devid = pdev->bus->number;
devid = (devid << 8) | pdev->devfn;
return devid;
}
static struct device_state *get_device_state(u16 devid)
{
struct device_state *dev_state;
unsigned long flags;
spin_lock_irqsave(&state_lock, flags);
dev_state = state_table[devid];
if (dev_state != NULL)
atomic_inc(&dev_state->count);
spin_unlock_irqrestore(&state_lock, flags);
return dev_state;
}
static void free_device_state(struct device_state *dev_state)
{
/*
* First detach device from domain - No more PRI requests will arrive
* from that device after it is unbound from the IOMMUv2 domain.
*/
iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
/* Everything is down now, free the IOMMUv2 domain */
iommu_domain_free(dev_state->domain);
/* Finally get rid of the device-state */
kfree(dev_state);
}
static void put_device_state(struct device_state *dev_state)
{
if (atomic_dec_and_test(&dev_state->count))
wake_up(&dev_state->wq);
}
static void put_device_state_wait(struct device_state *dev_state)
{
DEFINE_WAIT(wait);
prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
if (!atomic_dec_and_test(&dev_state->count))
schedule();
finish_wait(&dev_state->wq, &wait);
free_device_state(dev_state);
}
static struct notifier_block profile_nb = {
.notifier_call = task_exit,
};
static void link_pasid_state(struct pasid_state *pasid_state)
{
spin_lock(&ps_lock);
list_add_tail(&pasid_state->list, &pasid_state_list);
spin_unlock(&ps_lock);
}
static void __unlink_pasid_state(struct pasid_state *pasid_state)
{
list_del(&pasid_state->list);
}
static void unlink_pasid_state(struct pasid_state *pasid_state)
{
spin_lock(&ps_lock);
__unlink_pasid_state(pasid_state);
spin_unlock(&ps_lock);
}
/* Must be called under dev_state->lock */
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
int pasid, bool alloc)
{
struct pasid_state **root, **ptr;
int level, index;
level = dev_state->pasid_levels;
root = dev_state->states;
while (true) {
index = (pasid >> (9 * level)) & 0x1ff;
ptr = &root[index];
if (level == 0)
break;
if (*ptr == NULL) {
if (!alloc)
return NULL;
*ptr = (void *)get_zeroed_page(GFP_ATOMIC);
if (*ptr == NULL)
return NULL;
}
root = (struct pasid_state **)*ptr;
level -= 1;
}
return ptr;
}
static int set_pasid_state(struct device_state *dev_state,
struct pasid_state *pasid_state,
int pasid)
{
struct pasid_state **ptr;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev_state->lock, flags);
ptr = __get_pasid_state_ptr(dev_state, pasid, true);
ret = -ENOMEM;
if (ptr == NULL)
goto out_unlock;
ret = -ENOMEM;
if (*ptr != NULL)
goto out_unlock;
*ptr = pasid_state;
ret = 0;
out_unlock:
spin_unlock_irqrestore(&dev_state->lock, flags);
return ret;
}
static void clear_pasid_state(struct device_state *dev_state, int pasid)
{
struct pasid_state **ptr;
unsigned long flags;
spin_lock_irqsave(&dev_state->lock, flags);
ptr = __get_pasid_state_ptr(dev_state, pasid, true);
if (ptr == NULL)
goto out_unlock;
*ptr = NULL;
out_unlock:
spin_unlock_irqrestore(&dev_state->lock, flags);
}
static struct pasid_state *get_pasid_state(struct device_state *dev_state,
int pasid)
{
struct pasid_state **ptr, *ret = NULL;
unsigned long flags;
spin_lock_irqsave(&dev_state->lock, flags);
ptr = __get_pasid_state_ptr(dev_state, pasid, false);
if (ptr == NULL)
goto out_unlock;
ret = *ptr;
if (ret)
atomic_inc(&ret->count);
out_unlock:
spin_unlock_irqrestore(&dev_state->lock, flags);
return ret;
}
static void free_pasid_state(struct pasid_state *pasid_state)
{
kfree(pasid_state);
}
static void put_pasid_state(struct pasid_state *pasid_state)
{
if (atomic_dec_and_test(&pasid_state->count)) {
put_device_state(pasid_state->device_state);
wake_up(&pasid_state->wq);
}
}
static void put_pasid_state_wait(struct pasid_state *pasid_state)
{
DEFINE_WAIT(wait);
prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_dec_and_test(&pasid_state->count))
put_device_state(pasid_state->device_state);
else
schedule();
finish_wait(&pasid_state->wq, &wait);
mmput(pasid_state->mm);
free_pasid_state(pasid_state);
}
static void __unbind_pasid(struct pasid_state *pasid_state)
{
struct iommu_domain *domain;
domain = pasid_state->device_state->domain;
amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
/* Make sure no more pending faults are in the queue */
flush_workqueue(iommu_wq);
mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
put_pasid_state(pasid_state); /* Reference taken in bind() function */
}
static void unbind_pasid(struct device_state *dev_state, int pasid)
{
struct pasid_state *pasid_state;
pasid_state = get_pasid_state(dev_state, pasid);
if (pasid_state == NULL)
return;
unlink_pasid_state(pasid_state);
__unbind_pasid(pasid_state);
put_pasid_state_wait(pasid_state); /* Reference taken in this function */
}
static void free_pasid_states_level1(struct pasid_state **tbl)
{
int i;
for (i = 0; i < 512; ++i) {
if (tbl[i] == NULL)
continue;
free_page((unsigned long)tbl[i]);
}
}
static void free_pasid_states_level2(struct pasid_state **tbl)
{
struct pasid_state **ptr;
int i;
for (i = 0; i < 512; ++i) {
if (tbl[i] == NULL)
continue;
ptr = (struct pasid_state **)tbl[i];
free_pasid_states_level1(ptr);
}
}
static void free_pasid_states(struct device_state *dev_state)
{
struct pasid_state *pasid_state;
int i;
for (i = 0; i < dev_state->max_pasids; ++i) {
pasid_state = get_pasid_state(dev_state, i);
if (pasid_state == NULL)
continue;
put_pasid_state(pasid_state);
unbind_pasid(dev_state, i);
}
if (dev_state->pasid_levels == 2)
free_pasid_states_level2(dev_state->states);
else if (dev_state->pasid_levels == 1)
free_pasid_states_level1(dev_state->states);
else if (dev_state->pasid_levels != 0)
BUG();
free_page((unsigned long)dev_state->states);
}
static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
{
return container_of(mn, struct pasid_state, mn);
}
static void __mn_flush_page(struct mmu_notifier *mn,
unsigned long address)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
}
static int mn_clear_flush_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
__mn_flush_page(mn, address);
return 0;
}
static void mn_change_pte(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
pte_t pte)
{
__mn_flush_page(mn, address);
}
static void mn_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
__mn_flush_page(mn, address);
}
static void mn_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
__pa(empty_page_table));
}
static void mn_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
__pa(pasid_state->mm->pgd));
}
static struct mmu_notifier_ops iommu_mn = {
.clear_flush_young = mn_clear_flush_young,
.change_pte = mn_change_pte,
.invalidate_page = mn_invalidate_page,
.invalidate_range_start = mn_invalidate_range_start,
.invalidate_range_end = mn_invalidate_range_end,
};
static void set_pri_tag_status(struct pasid_state *pasid_state,
u16 tag, int status)
{
unsigned long flags;
spin_lock_irqsave(&pasid_state->lock, flags);
pasid_state->pri[tag].status = status;
spin_unlock_irqrestore(&pasid_state->lock, flags);
}
static void finish_pri_tag(struct device_state *dev_state,
struct pasid_state *pasid_state,
u16 tag)
{
unsigned long flags;
spin_lock_irqsave(&pasid_state->lock, flags);
if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
pasid_state->pri[tag].finish) {
amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
pasid_state->pri[tag].status, tag);
pasid_state->pri[tag].finish = false;
pasid_state->pri[tag].status = PPR_SUCCESS;
}
spin_unlock_irqrestore(&pasid_state->lock, flags);
}
static void do_fault(struct work_struct *work)
{
struct fault *fault = container_of(work, struct fault, work);
int npages, write;
struct page *page;
write = !!(fault->flags & PPR_FAULT_WRITE);
npages = get_user_pages(fault->state->task, fault->state->mm,
fault->address, 1, write, 0, &page, NULL);
if (npages == 1) {
put_page(page);
} else if (fault->dev_state->inv_ppr_cb) {
int status;
status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
fault->pasid,
fault->address,
fault->flags);
switch (status) {
case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
break;
case AMD_IOMMU_INV_PRI_RSP_INVALID:
set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
break;
case AMD_IOMMU_INV_PRI_RSP_FAIL:
set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
break;
default:
BUG();
}
} else {
set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
}
finish_pri_tag(fault->dev_state, fault->state, fault->tag);
put_pasid_state(fault->state);
kfree(fault);
}
static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
{
struct amd_iommu_fault *iommu_fault;
struct pasid_state *pasid_state;
struct device_state *dev_state;
unsigned long flags;
struct fault *fault;
bool finish;
u16 tag;
int ret;
iommu_fault = data;
tag = iommu_fault->tag & 0x1ff;
finish = (iommu_fault->tag >> 9) & 1;
ret = NOTIFY_DONE;
dev_state = get_device_state(iommu_fault->device_id);
if (dev_state == NULL)
goto out;
pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
if (pasid_state == NULL) {
/* We know the device but not the PASID -> send INVALID */
amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
PPR_INVALID, tag);
goto out_drop_state;
}
spin_lock_irqsave(&pasid_state->lock, flags);
atomic_inc(&pasid_state->pri[tag].inflight);
if (finish)
pasid_state->pri[tag].finish = true;
spin_unlock_irqrestore(&pasid_state->lock, flags);
fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
if (fault == NULL) {
/* We are OOM - send success and let the device re-fault */
finish_pri_tag(dev_state, pasid_state, tag);
goto out_drop_state;
}
fault->dev_state = dev_state;
fault->address = iommu_fault->address;
fault->state = pasid_state;
fault->tag = tag;
fault->finish = finish;
fault->flags = iommu_fault->flags;
INIT_WORK(&fault->work, do_fault);
queue_work(iommu_wq, &fault->work);
ret = NOTIFY_OK;
out_drop_state:
put_device_state(dev_state);
out:
return ret;
}
static struct notifier_block ppr_nb = {
.notifier_call = ppr_notifier,
};
static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
{
struct pasid_state *pasid_state;
struct task_struct *task;
task = data;
/*
* Using this notifier is a hack - but there is no other choice
* at the moment. What I really want is a sleeping notifier that
* is called when an MM goes down. But such a notifier doesn't
* exist yet. The notifier needs to sleep because it has to make
* sure that the device does not use the PASID and the address
* space anymore before it is destroyed. This includes waiting
* for pending PRI requests to pass the workqueue. The
* MMU-Notifiers would be a good fit, but they use RCU and so
* they are not allowed to sleep. Lets see how we can solve this
* in a more intelligent way in the future.
*/
again:
spin_lock(&ps_lock);
list_for_each_entry(pasid_state, &pasid_state_list, list) {
struct device_state *dev_state;
int pasid;
if (pasid_state->task != task)
continue;
/* Drop Lock and unbind */
spin_unlock(&ps_lock);
dev_state = pasid_state->device_state;
pasid = pasid_state->pasid;
if (pasid_state->device_state->inv_ctx_cb)
dev_state->inv_ctx_cb(dev_state->pdev, pasid);
unbind_pasid(dev_state, pasid);
/* Task may be in the list multiple times */
goto again;
}
spin_unlock(&ps_lock);
return NOTIFY_OK;
}
int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
struct task_struct *task)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
u16 devid;
int ret;
might_sleep();
if (!amd_iommu_v2_supported())
return -ENODEV;
devid = device_id(pdev);
dev_state = get_device_state(devid);
if (dev_state == NULL)
return -EINVAL;
ret = -EINVAL;
if (pasid < 0 || pasid >= dev_state->max_pasids)
goto out;
ret = -ENOMEM;
pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
if (pasid_state == NULL)
goto out;
atomic_set(&pasid_state->count, 1);
init_waitqueue_head(&pasid_state->wq);
spin_lock_init(&pasid_state->lock);
pasid_state->task = task;
pasid_state->mm = get_task_mm(task);
pasid_state->device_state = dev_state;
pasid_state->pasid = pasid;
pasid_state->mn.ops = &iommu_mn;
if (pasid_state->mm == NULL)
goto out_free;
mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
ret = set_pasid_state(dev_state, pasid_state, pasid);
if (ret)
goto out_unregister;
ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
__pa(pasid_state->mm->pgd));
if (ret)
goto out_clear_state;
link_pasid_state(pasid_state);
return 0;
out_clear_state:
clear_pasid_state(dev_state, pasid);
out_unregister:
mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
out_free:
free_pasid_state(pasid_state);
out:
put_device_state(dev_state);
return ret;
}
EXPORT_SYMBOL(amd_iommu_bind_pasid);
void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
{
struct device_state *dev_state;
u16 devid;
might_sleep();
if (!amd_iommu_v2_supported())
return;
devid = device_id(pdev);
dev_state = get_device_state(devid);
if (dev_state == NULL)
return;
if (pasid < 0 || pasid >= dev_state->max_pasids)
goto out;
unbind_pasid(dev_state, pasid);
out:
put_device_state(dev_state);
}
EXPORT_SYMBOL(amd_iommu_unbind_pasid);
int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
{
struct device_state *dev_state;
unsigned long flags;
int ret, tmp;
u16 devid;
might_sleep();
if (!amd_iommu_v2_supported())
return -ENODEV;
if (pasids <= 0 || pasids > (PASID_MASK + 1))
return -EINVAL;
devid = device_id(pdev);
dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
if (dev_state == NULL)
return -ENOMEM;
spin_lock_init(&dev_state->lock);
init_waitqueue_head(&dev_state->wq);
dev_state->pdev = pdev;
tmp = pasids;
for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
dev_state->pasid_levels += 1;
atomic_set(&dev_state->count, 1);
dev_state->max_pasids = pasids;
ret = -ENOMEM;
dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
if (dev_state->states == NULL)
goto out_free_dev_state;
dev_state->domain = iommu_domain_alloc(&pci_bus_type);
if (dev_state->domain == NULL)
goto out_free_states;
amd_iommu_domain_direct_map(dev_state->domain);
ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
if (ret)
goto out_free_domain;
ret = iommu_attach_device(dev_state->domain, &pdev->dev);
if (ret != 0)
goto out_free_domain;
spin_lock_irqsave(&state_lock, flags);
if (state_table[devid] != NULL) {
spin_unlock_irqrestore(&state_lock, flags);
ret = -EBUSY;
goto out_free_domain;
}
state_table[devid] = dev_state;
spin_unlock_irqrestore(&state_lock, flags);
return 0;
out_free_domain:
iommu_domain_free(dev_state->domain);
out_free_states:
free_page((unsigned long)dev_state->states);
out_free_dev_state:
kfree(dev_state);
return ret;
}
EXPORT_SYMBOL(amd_iommu_init_device);
void amd_iommu_free_device(struct pci_dev *pdev)
{
struct device_state *dev_state;
unsigned long flags;
u16 devid;
if (!amd_iommu_v2_supported())
return;
devid = device_id(pdev);
spin_lock_irqsave(&state_lock, flags);
dev_state = state_table[devid];
if (dev_state == NULL) {
spin_unlock_irqrestore(&state_lock, flags);
return;
}
state_table[devid] = NULL;
spin_unlock_irqrestore(&state_lock, flags);
/* Get rid of any remaining pasid states */
free_pasid_states(dev_state);
put_device_state_wait(dev_state);
}
EXPORT_SYMBOL(amd_iommu_free_device);
int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
amd_iommu_invalid_ppr_cb cb)
{
struct device_state *dev_state;
unsigned long flags;
u16 devid;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
devid = device_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
dev_state = state_table[devid];
if (dev_state == NULL)
goto out_unlock;
dev_state->inv_ppr_cb = cb;
ret = 0;
out_unlock:
spin_unlock_irqrestore(&state_lock, flags);
return ret;
}
EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
amd_iommu_invalidate_ctx cb)
{
struct device_state *dev_state;
unsigned long flags;
u16 devid;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
devid = device_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
dev_state = state_table[devid];
if (dev_state == NULL)
goto out_unlock;
dev_state->inv_ctx_cb = cb;
ret = 0;
out_unlock:
spin_unlock_irqrestore(&state_lock, flags);
return ret;
}
EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
static int __init amd_iommu_v2_init(void)
{
size_t state_table_size;
int ret;
pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
if (!amd_iommu_v2_supported()) {
pr_info("AMD IOMMUv2 functionality not available on this sytem\n");
/*
* Load anyway to provide the symbols to other modules
* which may use AMD IOMMUv2 optionally.
*/
return 0;
}
spin_lock_init(&state_lock);
state_table_size = MAX_DEVICES * sizeof(struct device_state *);
state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(state_table_size));
if (state_table == NULL)
return -ENOMEM;
ret = -ENOMEM;
iommu_wq = create_workqueue("amd_iommu_v2");
if (iommu_wq == NULL)
goto out_free;
ret = -ENOMEM;
empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
if (empty_page_table == NULL)
goto out_destroy_wq;
amd_iommu_register_ppr_notifier(&ppr_nb);
profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
return 0;
out_destroy_wq:
destroy_workqueue(iommu_wq);
out_free:
free_pages((unsigned long)state_table, get_order(state_table_size));
return ret;
}
static void __exit amd_iommu_v2_exit(void)
{
struct device_state *dev_state;
size_t state_table_size;
int i;
if (!amd_iommu_v2_supported())
return;
profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
amd_iommu_unregister_ppr_notifier(&ppr_nb);
flush_workqueue(iommu_wq);
/*
* The loop below might call flush_workqueue(), so call
* destroy_workqueue() after it
*/
for (i = 0; i < MAX_DEVICES; ++i) {
dev_state = get_device_state(i);
if (dev_state == NULL)
continue;
WARN_ON_ONCE(1);
put_device_state(dev_state);
amd_iommu_free_device(dev_state->pdev);
}
destroy_workqueue(iommu_wq);
state_table_size = MAX_DEVICES * sizeof(struct device_state *);
free_pages((unsigned long)state_table, get_order(state_table_size));
free_page((unsigned long)empty_page_table);
}
module_init(amd_iommu_v2_init);
module_exit(amd_iommu_v2_exit);
| gpl-2.0 |
KutuSystems/linux | drivers/input/touchscreen/wm9712.c | 1925 | 12199 | /*
* wm9712.c -- Codec driver for Wolfson WM9712 AC97 Codecs.
*
* Copyright 2003, 2004, 2005, 2006, 2007 Wolfson Microelectronics PLC.
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
* Parts Copyright : Ian Molton <spyro@f2s.com>
* Andrew Zabolotny <zap@homelink.ru>
* Russell King <rmk@arm.linux.org.uk>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/wm97xx.h>
#define TS_NAME "wm97xx"
#define WM9712_VERSION "1.00"
#define DEFAULT_PRESSURE 0xb0c0
/*
* Module parameters
*/
/*
* Set internal pull up for pen detect.
*
* Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive)
* i.e. pull up resistance = 64k Ohms / rpu.
*
* Adjust this value if you are having problems with pen detect not
* detecting any down event.
*/
static int rpu = 8;
module_param(rpu, int, 0);
MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
/*
* Set current used for pressure measurement.
*
* Set pil = 2 to use 400uA
* pil = 1 to use 200uA and
* pil = 0 to disable pressure measurement.
*
* This is used to increase the range of values returned by the adc
* when measureing touchpanel pressure.
*/
static int pil;
module_param(pil, int, 0);
MODULE_PARM_DESC(pil, "Set current used for pressure measurement.");
/*
* Set threshold for pressure measurement.
*
* Pen down pressure below threshold is ignored.
*/
static int pressure = DEFAULT_PRESSURE & 0xfff;
module_param(pressure, int, 0);
MODULE_PARM_DESC(pressure, "Set threshold for pressure measurement.");
/*
* Set adc sample delay.
*
* For accurate touchpanel measurements, some settling time may be
* required between the switch matrix applying a voltage across the
* touchpanel plate and the ADC sampling the signal.
*
* This delay can be set by setting delay = n, where n is the array
* position of the delay in the array delay_table below.
* Long delays > 1ms are supported for completeness, but are not
* recommended.
*/
static int delay = 3;
module_param(delay, int, 0);
MODULE_PARM_DESC(delay, "Set adc sample delay.");
/*
* Set five_wire = 1 to use a 5 wire touchscreen.
*
* NOTE: Five wire mode does not allow for readback of pressure.
*/
static int five_wire;
module_param(five_wire, int, 0);
MODULE_PARM_DESC(five_wire, "Set to '1' to use 5-wire touchscreen.");
/*
* Set adc mask function.
*
* Sources of glitch noise, such as signals driving an LCD display, may feed
* through to the touch screen plates and affect measurement accuracy. In
* order to minimise this, a signal may be applied to the MASK pin to delay or
* synchronise the sampling.
*
* 0 = No delay or sync
* 1 = High on pin stops conversions
* 2 = Edge triggered, edge on pin delays conversion by delay param (above)
* 3 = Edge triggered, edge on pin starts conversion after delay param
*/
static int mask;
module_param(mask, int, 0);
MODULE_PARM_DESC(mask, "Set adc mask function.");
/*
* Coordinate Polling Enable.
*
* Set to 1 to enable coordinate polling. e.g. x,y[,p] is sampled together
* for every poll.
*/
static int coord;
module_param(coord, int, 0);
MODULE_PARM_DESC(coord, "Polling coordinate mode");
/*
* ADC sample delay times in uS
*/
static const int delay_table[] = {
21, /* 1 AC97 Link frames */
42, /* 2 */
84, /* 4 */
167, /* 8 */
333, /* 16 */
667, /* 32 */
1000, /* 48 */
1333, /* 64 */
2000, /* 96 */
2667, /* 128 */
3333, /* 160 */
4000, /* 192 */
4667, /* 224 */
5333, /* 256 */
6000, /* 288 */
0 /* No delay, switch matrix always on */
};
/*
* Delay after issuing a POLL command.
*
* The delay is 3 AC97 link frames + the touchpanel settling delay
*/
static inline void poll_delay(int d)
{
udelay(3 * AC97_LINK_FRAME + delay_table[d]);
}
/*
* set up the physical settings of the WM9712
*/
static void wm9712_phy_init(struct wm97xx *wm)
{
u16 dig1 = 0;
u16 dig2 = WM97XX_RPR | WM9712_RPU(1);
/* WM9712 rpu */
if (rpu) {
dig2 &= 0xffc0;
dig2 |= WM9712_RPU(rpu);
dev_dbg(wm->dev, "setting pen detect pull-up to %d Ohms\n",
64000 / rpu);
}
/* WM9712 five wire */
if (five_wire) {
dig2 |= WM9712_45W;
dev_dbg(wm->dev, "setting 5-wire touchscreen mode.\n");
if (pil) {
dev_warn(wm->dev, "pressure measurement is not "
"supported in 5-wire mode\n");
pil = 0;
}
}
/* touchpanel pressure current*/
if (pil == 2) {
dig2 |= WM9712_PIL;
dev_dbg(wm->dev,
"setting pressure measurement current to 400uA.\n");
} else if (pil)
dev_dbg(wm->dev,
"setting pressure measurement current to 200uA.\n");
if (!pil)
pressure = 0;
/* polling mode sample settling delay */
if (delay < 0 || delay > 15) {
dev_dbg(wm->dev, "supplied delay out of range.\n");
delay = 4;
}
dig1 &= 0xff0f;
dig1 |= WM97XX_DELAY(delay);
dev_dbg(wm->dev, "setting adc sample delay to %d u Secs.\n",
delay_table[delay]);
/* mask */
dig2 |= ((mask & 0x3) << 6);
if (mask) {
u16 reg;
/* Set GPIO4 as Mask Pin*/
reg = wm97xx_reg_read(wm, AC97_MISC_AFE);
wm97xx_reg_write(wm, AC97_MISC_AFE, reg | WM97XX_GPIO_4);
reg = wm97xx_reg_read(wm, AC97_GPIO_CFG);
wm97xx_reg_write(wm, AC97_GPIO_CFG, reg | WM97XX_GPIO_4);
}
/* wait - coord mode */
if (coord)
dig2 |= WM9712_WAIT;
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, dig1);
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, dig2);
}
static void wm9712_dig_enable(struct wm97xx *wm, int enable)
{
u16 dig2 = wm->dig[2];
if (enable) {
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2,
dig2 | WM97XX_PRP_DET_DIG);
wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* dummy read */
} else
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2,
dig2 & ~WM97XX_PRP_DET_DIG);
}
static void wm9712_aux_prepare(struct wm97xx *wm)
{
memcpy(wm->dig_save, wm->dig, sizeof(wm->dig));
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, 0);
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, WM97XX_PRP_DET_DIG);
}
static void wm9712_dig_restore(struct wm97xx *wm)
{
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, wm->dig_save[1]);
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, wm->dig_save[2]);
}
static inline int is_pden(struct wm97xx *wm)
{
return wm->dig[2] & WM9712_PDEN;
}
/*
* Read a sample from the WM9712 adc in polling mode.
*/
static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
{
int timeout = 5 * delay;
bool wants_pen = adcsel & WM97XX_PEN_DOWN;
if (wants_pen && !wm->pen_probably_down) {
u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(data & WM97XX_PEN_DOWN))
return RC_PENUP;
wm->pen_probably_down = 1;
}
/* set up digitiser */
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(adcsel);
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, (adcsel & WM97XX_ADCSEL_MASK)
| WM97XX_POLL | WM97XX_DELAY(delay));
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
/* wait for POLL to go low */
while ((wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER1) & WM97XX_POLL)
&& timeout) {
udelay(AC97_LINK_FRAME);
timeout--;
}
if (timeout <= 0) {
/* If PDEN is set, we can get a timeout when pen goes up */
if (is_pden(wm))
wm->pen_probably_down = 0;
else
dev_dbg(wm->dev, "adc sample timeout\n");
return RC_PENUP;
}
*sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (wm->mach_ops && wm->mach_ops->post_sample)
wm->mach_ops->post_sample(adcsel);
/* check we have correct sample */
if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x\n",
adcsel & WM97XX_ADCSEL_MASK,
*sample & WM97XX_ADCSEL_MASK);
return RC_AGAIN;
}
if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
/* Sometimes it reads a wrong value the first time. */
*sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(*sample & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
}
return RC_VALID;
}
/*
* Read a coord from the WM9712 adc in polling mode.
*/
static int wm9712_poll_coord(struct wm97xx *wm, struct wm97xx_data *data)
{
int timeout = 5 * delay;
if (!wm->pen_probably_down) {
u16 data_rd = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(data_rd & WM97XX_PEN_DOWN))
return RC_PENUP;
wm->pen_probably_down = 1;
}
/* set up digitiser */
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1,
WM97XX_COO | WM97XX_POLL | WM97XX_DELAY(delay));
/* wait 3 AC97 time slots + delay for conversion and read x */
poll_delay(delay);
data->x = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
/* wait for POLL to go low */
while ((wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER1) & WM97XX_POLL)
&& timeout) {
udelay(AC97_LINK_FRAME);
timeout--;
}
if (timeout <= 0) {
/* If PDEN is set, we can get a timeout when pen goes up */
if (is_pden(wm))
wm->pen_probably_down = 0;
else
dev_dbg(wm->dev, "adc sample timeout\n");
return RC_PENUP;
}
/* read back y data */
data->y = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (pil)
data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
else
data->p = DEFAULT_PRESSURE;
if (wm->mach_ops && wm->mach_ops->post_sample)
wm->mach_ops->post_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
/* check we have correct sample */
if (!(data->x & WM97XX_ADCSEL_X) || !(data->y & WM97XX_ADCSEL_Y))
goto err;
if (pil && !(data->p & WM97XX_ADCSEL_PRES))
goto err;
if (!(data->x & WM97XX_PEN_DOWN) || !(data->y & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
return RC_VALID;
err:
return 0;
}
/*
* Sample the WM9712 touchscreen in polling mode
*/
static int wm9712_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
{
int rc;
if (coord) {
rc = wm9712_poll_coord(wm, data);
if (rc != RC_VALID)
return rc;
} else {
rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN,
&data->x);
if (rc != RC_VALID)
return rc;
rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN,
&data->y);
if (rc != RC_VALID)
return rc;
if (pil && !five_wire) {
rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
&data->p);
if (rc != RC_VALID)
return rc;
} else
data->p = DEFAULT_PRESSURE;
}
return RC_VALID;
}
/*
* Enable WM9712 continuous mode, i.e. touch data is streamed across
* an AC97 slot
*/
static int wm9712_acc_enable(struct wm97xx *wm, int enable)
{
u16 dig1, dig2;
int ret = 0;
dig1 = wm->dig[1];
dig2 = wm->dig[2];
if (enable) {
/* continuous mode */
if (wm->mach_ops->acc_startup) {
ret = wm->mach_ops->acc_startup(wm);
if (ret < 0)
return ret;
}
dig1 &= ~(WM97XX_CM_RATE_MASK | WM97XX_ADCSEL_MASK |
WM97XX_DELAY_MASK | WM97XX_SLT_MASK);
dig1 |= WM97XX_CTC | WM97XX_COO | WM97XX_SLEN |
WM97XX_DELAY(delay) |
WM97XX_SLT(wm->acc_slot) |
WM97XX_RATE(wm->acc_rate);
if (pil)
dig1 |= WM97XX_ADCSEL_PRES;
dig2 |= WM9712_PDEN;
} else {
dig1 &= ~(WM97XX_CTC | WM97XX_COO | WM97XX_SLEN);
dig2 &= ~WM9712_PDEN;
if (wm->mach_ops->acc_shutdown)
wm->mach_ops->acc_shutdown(wm);
}
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, dig1);
wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER2, dig2);
return 0;
}
struct wm97xx_codec_drv wm9712_codec = {
.id = WM9712_ID2,
.name = "wm9712",
.poll_sample = wm9712_poll_sample,
.poll_touch = wm9712_poll_touch,
.acc_enable = wm9712_acc_enable,
.phy_init = wm9712_phy_init,
.dig_enable = wm9712_dig_enable,
.dig_restore = wm9712_dig_restore,
.aux_prepare = wm9712_aux_prepare,
};
EXPORT_SYMBOL_GPL(wm9712_codec);
/* Module information */
MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
MODULE_DESCRIPTION("WM9712 Touch Screen Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Wonfee/android_kernel_asus_grouper | drivers/isdn/hisax/hscx.c | 4229 | 7537 | /* $Id: hscx.c,v 1.24.2.4 2004/01/24 20:47:23 keil Exp $
*
* HSCX specific routines
*
* Author Karsten Keil
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include "hisax.h"
#include "hscx.h"
#include "isac.h"
#include "isdnl1.h"
#include <linux/interrupt.h>
#include <linux/slab.h>
static char *HSCXVer[] =
{"A1", "?1", "A2", "?3", "A3", "V2.1", "?6", "?7",
"?8", "?9", "?10", "?11", "?12", "?13", "?14", "???"};
int
HscxVersion(struct IsdnCardState *cs, char *s)
{
int verA, verB;
verA = cs->BC_Read_Reg(cs, 0, HSCX_VSTR) & 0xf;
verB = cs->BC_Read_Reg(cs, 1, HSCX_VSTR) & 0xf;
printk(KERN_INFO "%s HSCX version A: %s B: %s\n", s,
HSCXVer[verA], HSCXVer[verB]);
if ((verA == 0) | (verA == 0xf) | (verB == 0) | (verB == 0xf))
return (1);
else
return (0);
}
void
modehscx(struct BCState *bcs, int mode, int bc)
{
struct IsdnCardState *cs = bcs->cs;
int hscx = bcs->hw.hscx.hscx;
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "hscx %c mode %d ichan %d",
'A' + hscx, mode, bc);
bcs->mode = mode;
bcs->channel = bc;
cs->BC_Write_Reg(cs, hscx, HSCX_XAD1, 0xFF);
cs->BC_Write_Reg(cs, hscx, HSCX_XAD2, 0xFF);
cs->BC_Write_Reg(cs, hscx, HSCX_RAH2, 0xFF);
cs->BC_Write_Reg(cs, hscx, HSCX_XBCH, 0x0);
cs->BC_Write_Reg(cs, hscx, HSCX_RLCR, 0x0);
cs->BC_Write_Reg(cs, hscx, HSCX_CCR1,
test_bit(HW_IPAC, &cs->HW_Flags) ? 0x82 : 0x85);
cs->BC_Write_Reg(cs, hscx, HSCX_CCR2, 0x30);
cs->BC_Write_Reg(cs, hscx, HSCX_XCCR, 7);
cs->BC_Write_Reg(cs, hscx, HSCX_RCCR, 7);
/* Switch IOM 1 SSI */
if (test_bit(HW_IOM1, &cs->HW_Flags) && (hscx == 0))
bc = 1 - bc;
if (bc == 0) {
cs->BC_Write_Reg(cs, hscx, HSCX_TSAX,
test_bit(HW_IOM1, &cs->HW_Flags) ? 0x7 : bcs->hw.hscx.tsaxr0);
cs->BC_Write_Reg(cs, hscx, HSCX_TSAR,
test_bit(HW_IOM1, &cs->HW_Flags) ? 0x7 : bcs->hw.hscx.tsaxr0);
} else {
cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, bcs->hw.hscx.tsaxr1);
cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, bcs->hw.hscx.tsaxr1);
}
switch (mode) {
case (L1_MODE_NULL):
cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, 0x1f);
cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, 0x1f);
cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0x84);
break;
case (L1_MODE_TRANS):
cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0xe4);
break;
case (L1_MODE_HDLC):
cs->BC_Write_Reg(cs, hscx, HSCX_CCR1,
test_bit(HW_IPAC, &cs->HW_Flags) ? 0x8a : 0x8d);
cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0x8c);
break;
}
if (mode)
cs->BC_Write_Reg(cs, hscx, HSCX_CMDR, 0x41);
cs->BC_Write_Reg(cs, hscx, HSCX_ISTA, 0x00);
}
void
hscx_l2l1(struct PStack *st, int pr, void *arg)
{
struct BCState *bcs = st->l1.bcs;
u_long flags;
struct sk_buff *skb = arg;
switch (pr) {
case (PH_DATA | REQUEST):
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->tx_skb) {
skb_queue_tail(&bcs->squeue, skb);
} else {
bcs->tx_skb = skb;
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->hw.hscx.count = 0;
bcs->cs->BC_Send_Data(bcs);
}
spin_unlock_irqrestore(&bcs->cs->lock, flags);
break;
case (PH_PULL | INDICATION):
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->tx_skb) {
printk(KERN_WARNING "hscx_l2l1: this shouldn't happen\n");
} else {
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->tx_skb = skb;
bcs->hw.hscx.count = 0;
bcs->cs->BC_Send_Data(bcs);
}
spin_unlock_irqrestore(&bcs->cs->lock, flags);
break;
case (PH_PULL | REQUEST):
if (!bcs->tx_skb) {
test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
} else
test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
break;
case (PH_ACTIVATE | REQUEST):
spin_lock_irqsave(&bcs->cs->lock, flags);
test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
modehscx(bcs, st->l1.mode, st->l1.bc);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
l1_msg_b(st, pr, arg);
break;
case (PH_DEACTIVATE | REQUEST):
l1_msg_b(st, pr, arg);
break;
case (PH_DEACTIVATE | CONFIRM):
spin_lock_irqsave(&bcs->cs->lock, flags);
test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
modehscx(bcs, 0, st->l1.bc);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
break;
}
}
static void
close_hscxstate(struct BCState *bcs)
{
modehscx(bcs, 0, bcs->channel);
if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
kfree(bcs->hw.hscx.rcvbuf);
bcs->hw.hscx.rcvbuf = NULL;
kfree(bcs->blog);
bcs->blog = NULL;
skb_queue_purge(&bcs->rqueue);
skb_queue_purge(&bcs->squeue);
if (bcs->tx_skb) {
dev_kfree_skb_any(bcs->tx_skb);
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
}
}
}
int
open_hscxstate(struct IsdnCardState *cs, struct BCState *bcs)
{
if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
if (!(bcs->hw.hscx.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
printk(KERN_WARNING
"HiSax: No memory for hscx.rcvbuf\n");
test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
return (1);
}
if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) {
printk(KERN_WARNING
"HiSax: No memory for bcs->blog\n");
test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
kfree(bcs->hw.hscx.rcvbuf);
bcs->hw.hscx.rcvbuf = NULL;
return (2);
}
skb_queue_head_init(&bcs->rqueue);
skb_queue_head_init(&bcs->squeue);
}
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->event = 0;
bcs->hw.hscx.rcvidx = 0;
bcs->tx_cnt = 0;
return (0);
}
static int
setstack_hscx(struct PStack *st, struct BCState *bcs)
{
bcs->channel = st->l1.bc;
if (open_hscxstate(st->l1.hardware, bcs))
return (-1);
st->l1.bcs = bcs;
st->l2.l2l1 = hscx_l2l1;
setstack_manager(st);
bcs->st = st;
setstack_l1_B(st);
return (0);
}
void
clear_pending_hscx_ints(struct IsdnCardState *cs)
{
int val, eval;
val = cs->BC_Read_Reg(cs, 1, HSCX_ISTA);
debugl1(cs, "HSCX B ISTA %x", val);
if (val & 0x01) {
eval = cs->BC_Read_Reg(cs, 1, HSCX_EXIR);
debugl1(cs, "HSCX B EXIR %x", eval);
}
if (val & 0x02) {
eval = cs->BC_Read_Reg(cs, 0, HSCX_EXIR);
debugl1(cs, "HSCX A EXIR %x", eval);
}
val = cs->BC_Read_Reg(cs, 0, HSCX_ISTA);
debugl1(cs, "HSCX A ISTA %x", val);
val = cs->BC_Read_Reg(cs, 1, HSCX_STAR);
debugl1(cs, "HSCX B STAR %x", val);
val = cs->BC_Read_Reg(cs, 0, HSCX_STAR);
debugl1(cs, "HSCX A STAR %x", val);
/* disable all IRQ */
cs->BC_Write_Reg(cs, 0, HSCX_MASK, 0xFF);
cs->BC_Write_Reg(cs, 1, HSCX_MASK, 0xFF);
}
void
inithscx(struct IsdnCardState *cs)
{
cs->bcs[0].BC_SetStack = setstack_hscx;
cs->bcs[1].BC_SetStack = setstack_hscx;
cs->bcs[0].BC_Close = close_hscxstate;
cs->bcs[1].BC_Close = close_hscxstate;
cs->bcs[0].hw.hscx.hscx = 0;
cs->bcs[1].hw.hscx.hscx = 1;
cs->bcs[0].hw.hscx.tsaxr0 = 0x2f;
cs->bcs[0].hw.hscx.tsaxr1 = 3;
cs->bcs[1].hw.hscx.tsaxr0 = 0x2f;
cs->bcs[1].hw.hscx.tsaxr1 = 3;
modehscx(cs->bcs, 0, 0);
modehscx(cs->bcs + 1, 0, 0);
}
void
inithscxisac(struct IsdnCardState *cs, int part)
{
if (part & 1) {
clear_pending_isac_ints(cs);
clear_pending_hscx_ints(cs);
initisac(cs);
inithscx(cs);
}
if (part & 2) {
/* Reenable all IRQ */
cs->writeisac(cs, ISAC_MASK, 0);
cs->BC_Write_Reg(cs, 0, HSCX_MASK, 0);
cs->BC_Write_Reg(cs, 1, HSCX_MASK, 0);
/* RESET Receiver and Transmitter */
cs->writeisac(cs, ISAC_CMDR, 0x41);
}
}
| gpl-2.0 |
ISTweak/android_kernel_sharp_is14sh | drivers/isdn/hisax/elsa.c | 4229 | 34367 | /* $Id: elsa.c,v 2.32.2.4 2004/01/24 20:47:21 keil Exp $
*
* low level stuff for Elsa isdn cards
*
* Author Karsten Keil
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
* Documentation/isdn/HiSax.cert
*
* Thanks to Elsa GmbH for documents and information
*
* Klaus Lichtenwalder (Klaus.Lichtenwalder@WebForum.DE)
* for ELSA PCMCIA support
*
*/
#include <linux/init.h>
#include <linux/slab.h>
#include "hisax.h"
#include "arcofi.h"
#include "isac.h"
#include "ipac.h"
#include "hscx.h"
#include "isdnl1.h"
#include <linux/pci.h>
#include <linux/isapnp.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
static const char *Elsa_revision = "$Revision: 2.32.2.4 $";
static const char *Elsa_Types[] =
{"None", "PC", "PCC-8", "PCC-16", "PCF", "PCF-Pro",
"PCMCIA", "QS 1000", "QS 3000", "Microlink PCI", "QS 3000 PCI",
"PCMCIA-IPAC" };
static const char *ITACVer[] =
{"?0?", "?1?", "?2?", "?3?", "?4?", "V2.2",
"B1", "A1"};
#define byteout(addr,val) outb(val,addr)
#define bytein(addr) inb(addr)
#define ELSA_ISAC 0
#define ELSA_ISAC_PCM 1
#define ELSA_ITAC 1
#define ELSA_HSCX 2
#define ELSA_ALE 3
#define ELSA_ALE_PCM 4
#define ELSA_CONTROL 4
#define ELSA_CONFIG 5
#define ELSA_START_TIMER 6
#define ELSA_TRIG_IRQ 7
#define ELSA_PC 1
#define ELSA_PCC8 2
#define ELSA_PCC16 3
#define ELSA_PCF 4
#define ELSA_PCFPRO 5
#define ELSA_PCMCIA 6
#define ELSA_QS1000 7
#define ELSA_QS3000 8
#define ELSA_QS1000PCI 9
#define ELSA_QS3000PCI 10
#define ELSA_PCMCIA_IPAC 11
/* PCI stuff */
#define ELSA_PCI_IRQ_MASK 0x04
/* ITAC Registeradressen (only Microlink PC) */
#define ITAC_SYS 0x34
#define ITAC_ISEN 0x48
#define ITAC_RFIE 0x4A
#define ITAC_XFIE 0x4C
#define ITAC_SCIE 0x4E
#define ITAC_STIE 0x46
/*** ***
*** Makros als Befehle fuer die Kartenregister ***
*** (mehrere Befehle werden durch Bit-Oderung kombiniert) ***
*** ***/
/* Config-Register (Read) */
#define ELIRQF_TIMER_RUN 0x02 /* Bit 1 des Config-Reg */
#define ELIRQF_TIMER_RUN_PCC8 0x01 /* Bit 0 des Config-Reg bei PCC */
#define ELSA_IRQ_IDX 0x38 /* Bit 3,4,5 des Config-Reg */
#define ELSA_IRQ_IDX_PCC8 0x30 /* Bit 4,5 des Config-Reg */
#define ELSA_IRQ_IDX_PC 0x0c /* Bit 2,3 des Config-Reg */
/* Control-Register (Write) */
#define ELSA_LINE_LED 0x02 /* Bit 1 Gelbe LED */
#define ELSA_STAT_LED 0x08 /* Bit 3 Gruene LED */
#define ELSA_ISDN_RESET 0x20 /* Bit 5 Reset-Leitung */
#define ELSA_ENA_TIMER_INT 0x80 /* Bit 7 Freigabe Timer Interrupt */
/* ALE-Register (Read) */
#define ELSA_HW_RELEASE 0x07 /* Bit 0-2 Hardwarerkennung */
#define ELSA_S0_POWER_BAD 0x08 /* Bit 3 S0-Bus Spannung fehlt */
/* Status Flags */
#define ELIRQF_TIMER_AKTIV 1
#define ELSA_BAD_PWR 2
#define ELSA_ASSIGN 4
#define RS_ISR_PASS_LIMIT 256
#define FLG_MODEM_ACTIVE 1
/* IPAC AUX */
#define ELSA_IPAC_LINE_LED 0x40 /* Bit 6 Gelbe LED */
#define ELSA_IPAC_STAT_LED 0x80 /* Bit 7 Gruene LED */
#if ARCOFI_USE
static struct arcofi_msg ARCOFI_XOP_F =
{NULL,0,2,{0xa1,0x3f,0,0,0,0,0,0,0,0}}; /* Normal OP */
static struct arcofi_msg ARCOFI_XOP_1 =
{&ARCOFI_XOP_F,0,2,{0xa1,0x31,0,0,0,0,0,0,0,0}}; /* PWR UP */
static struct arcofi_msg ARCOFI_SOP_F =
{&ARCOFI_XOP_1,0,10,{0xa1,0x1f,0x00,0x50,0x10,0x00,0x00,0x80,0x02,0x12}};
static struct arcofi_msg ARCOFI_COP_9 =
{&ARCOFI_SOP_F,0,10,{0xa1,0x29,0x80,0xcb,0xe9,0x88,0x00,0xc8,0xd8,0x80}}; /* RX */
static struct arcofi_msg ARCOFI_COP_8 =
{&ARCOFI_COP_9,0,10,{0xa1,0x28,0x49,0x31,0x8,0x13,0x6e,0x88,0x2a,0x61}}; /* TX */
static struct arcofi_msg ARCOFI_COP_7 =
{&ARCOFI_COP_8,0,4,{0xa1,0x27,0x80,0x80,0,0,0,0,0,0}}; /* GZ */
static struct arcofi_msg ARCOFI_COP_6 =
{&ARCOFI_COP_7,0,6,{0xa1,0x26,0,0,0x82,0x7c,0,0,0,0}}; /* GRL GRH */
static struct arcofi_msg ARCOFI_COP_5 =
{&ARCOFI_COP_6,0,4,{0xa1,0x25,0xbb,0x4a,0,0,0,0,0,0}}; /* GTX */
static struct arcofi_msg ARCOFI_VERSION =
{NULL,1,2,{0xa0,0,0,0,0,0,0,0,0,0}};
static struct arcofi_msg ARCOFI_XOP_0 =
{NULL,0,2,{0xa1,0x30,0,0,0,0,0,0,0,0}}; /* PWR Down */
static void set_arcofi(struct IsdnCardState *cs, int bc);
#include "elsa_ser.c"
#endif /* ARCOFI_USE */
static inline u_char
readreg(unsigned int ale, unsigned int adr, u_char off)
{
register u_char ret;
byteout(ale, off);
ret = bytein(adr);
return (ret);
}
static inline void
readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
byteout(ale, off);
insb(adr, data, size);
}
static inline void
writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
{
byteout(ale, off);
byteout(adr, data);
}
static inline void
writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
byteout(ale, off);
outsb(adr, data, size);
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
readfifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
writefifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0, data, size);
}
static u_char
ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset+0x80));
}
static void
WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset|0x80, value);
}
static void
ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char * data, int size)
{
readfifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0x80, data, size);
}
static void
WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char * data, int size)
{
writefifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0x80, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readreg(cs->hw.elsa.ale,
cs->hw.elsa.hscx, offset + (hscx ? 0x40 : 0)));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writereg(cs->hw.elsa.ale,
cs->hw.elsa.hscx, offset + (hscx ? 0x40 : 0), value);
}
static inline u_char
readitac(struct IsdnCardState *cs, u_char off)
{
register u_char ret;
byteout(cs->hw.elsa.ale, off);
ret = bytein(cs->hw.elsa.itac);
return (ret);
}
static inline void
writeitac(struct IsdnCardState *cs, u_char off, u_char data)
{
byteout(cs->hw.elsa.ale, off);
byteout(cs->hw.elsa.itac, data);
}
static inline int
TimerRun(struct IsdnCardState *cs)
{
register u_char v;
v = bytein(cs->hw.elsa.cfg);
if ((cs->subtyp == ELSA_QS1000) || (cs->subtyp == ELSA_QS3000))
return (0 == (v & ELIRQF_TIMER_RUN));
else if (cs->subtyp == ELSA_PCC8)
return (v & ELIRQF_TIMER_RUN_PCC8);
return (v & ELIRQF_TIMER_RUN);
}
/*
* fast interrupt HSCX stuff goes here
*/
#define READHSCX(cs, nr, reg) readreg(cs->hw.elsa.ale, \
cs->hw.elsa.hscx, reg + (nr ? 0x40 : 0))
#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.elsa.ale, \
cs->hw.elsa.hscx, reg + (nr ? 0x40 : 0), data)
#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.elsa.ale, \
cs->hw.elsa.hscx, (nr ? 0x40 : 0), ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.elsa.ale, \
cs->hw.elsa.hscx, (nr ? 0x40 : 0), ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
elsa_interrupt(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_long flags;
u_char val;
int icnt=5;
if ((cs->typ == ISDN_CTYPE_ELSA_PCMCIA) && (*cs->busy_flag == 1)) {
/* The card tends to generate interrupts while being removed
causing us to just crash the kernel. bad. */
printk(KERN_WARNING "Elsa: card not available!\n");
return IRQ_NONE;
}
spin_lock_irqsave(&cs->lock, flags);
#if ARCOFI_USE
if (cs->hw.elsa.MFlag) {
val = serial_inp(cs, UART_IIR);
if (!(val & UART_IIR_NO_INT)) {
debugl1(cs,"IIR %02x", val);
rs_interrupt_elsa(cs);
}
}
#endif
val = readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_ISTA + 0x40);
Start_HSCX:
if (val) {
hscx_int_main(cs, val);
}
val = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_ISTA);
Start_ISAC:
if (val) {
isac_interrupt(cs, val);
}
val = readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_ISTA + 0x40);
if (val && icnt) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "HSCX IntStat after IntRoutine");
icnt--;
goto Start_HSCX;
}
val = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_ISTA);
if (val && icnt) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
icnt--;
goto Start_ISAC;
}
if (!icnt)
printk(KERN_WARNING"ELSA IRQ LOOP\n");
writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK, 0xFF);
writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK + 0x40, 0xFF);
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_MASK, 0xFF);
if (cs->hw.elsa.status & ELIRQF_TIMER_AKTIV) {
if (!TimerRun(cs)) {
/* Timer Restart */
byteout(cs->hw.elsa.timer, 0);
cs->hw.elsa.counter++;
}
}
#if ARCOFI_USE
if (cs->hw.elsa.MFlag) {
val = serial_inp(cs, UART_MCR);
val ^= 0x8;
serial_outp(cs, UART_MCR, val);
val = serial_inp(cs, UART_MCR);
val ^= 0x8;
serial_outp(cs, UART_MCR, val);
}
#endif
if (cs->hw.elsa.trig)
byteout(cs->hw.elsa.trig, 0x00);
writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK, 0x0);
writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK + 0x40, 0x0);
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_MASK, 0x0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t
elsa_interrupt_ipac(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_long flags;
u_char ista,val;
int icnt=5;
spin_lock_irqsave(&cs->lock, flags);
if (cs->subtyp == ELSA_QS1000PCI || cs->subtyp == ELSA_QS3000PCI) {
val = bytein(cs->hw.elsa.cfg + 0x4c); /* PCI IRQ */
if (!(val & ELSA_PCI_IRQ_MASK)) {
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_NONE;
}
}
#if ARCOFI_USE
if (cs->hw.elsa.MFlag) {
val = serial_inp(cs, UART_IIR);
if (!(val & UART_IIR_NO_INT)) {
debugl1(cs,"IIR %02x", val);
rs_interrupt_elsa(cs);
}
}
#endif
ista = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ISTA);
Start_IPAC:
if (cs->debug & L1_DEB_IPAC)
debugl1(cs, "IPAC ISTA %02X", ista);
if (ista & 0x0f) {
val = readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_ISTA + 0x40);
if (ista & 0x01)
val |= 0x01;
if (ista & 0x04)
val |= 0x02;
if (ista & 0x08)
val |= 0x04;
if (val)
hscx_int_main(cs, val);
}
if (ista & 0x20) {
val = 0xfe & readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_ISTA + 0x80);
if (val) {
isac_interrupt(cs, val);
}
}
if (ista & 0x10) {
val = 0x01;
isac_interrupt(cs, val);
}
ista = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ISTA);
if ((ista & 0x3f) && icnt) {
icnt--;
goto Start_IPAC;
}
if (!icnt)
printk(KERN_WARNING "ELSA IRQ LOOP\n");
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_MASK, 0xFF);
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_MASK, 0xC0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_elsa(struct IsdnCardState *cs)
{
int bytecnt = 8;
del_timer(&cs->hw.elsa.tl);
#if ARCOFI_USE
clear_arcofi(cs);
#endif
if (cs->hw.elsa.ctrl)
byteout(cs->hw.elsa.ctrl, 0); /* LEDs Out */
if (cs->subtyp == ELSA_QS1000PCI) {
byteout(cs->hw.elsa.cfg + 0x4c, 0x01); /* disable IRQ */
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff);
bytecnt = 2;
release_region(cs->hw.elsa.cfg, 0x80);
}
if (cs->subtyp == ELSA_QS3000PCI) {
byteout(cs->hw.elsa.cfg + 0x4c, 0x03); /* disable ELSA PCI IRQ */
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff);
release_region(cs->hw.elsa.cfg, 0x80);
}
if (cs->subtyp == ELSA_PCMCIA_IPAC) {
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff);
}
if ((cs->subtyp == ELSA_PCFPRO) ||
(cs->subtyp == ELSA_QS3000) ||
(cs->subtyp == ELSA_PCF) ||
(cs->subtyp == ELSA_QS3000PCI)) {
bytecnt = 16;
#if ARCOFI_USE
release_modem(cs);
#endif
}
if (cs->hw.elsa.base)
release_region(cs->hw.elsa.base, bytecnt);
}
static void
reset_elsa(struct IsdnCardState *cs)
{
if (cs->hw.elsa.timer) {
/* Wait 1 Timer */
byteout(cs->hw.elsa.timer, 0);
while (TimerRun(cs));
cs->hw.elsa.ctrl_reg |= 0x50;
cs->hw.elsa.ctrl_reg &= ~ELSA_ISDN_RESET; /* Reset On */
byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
/* Wait 1 Timer */
byteout(cs->hw.elsa.timer, 0);
while (TimerRun(cs));
cs->hw.elsa.ctrl_reg |= ELSA_ISDN_RESET; /* Reset Off */
byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
/* Wait 1 Timer */
byteout(cs->hw.elsa.timer, 0);
while (TimerRun(cs));
if (cs->hw.elsa.trig)
byteout(cs->hw.elsa.trig, 0xff);
}
if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI) || (cs->subtyp == ELSA_PCMCIA_IPAC)) {
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_POTA2, 0x20);
mdelay(10);
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_POTA2, 0x00);
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_MASK, 0xc0);
mdelay(10);
if (cs->subtyp != ELSA_PCMCIA_IPAC) {
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ACFG, 0x0);
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_AOE, 0x3c);
} else {
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_PCFG, 0x10);
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ACFG, 0x4);
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_AOE, 0xf8);
}
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff);
if (cs->subtyp == ELSA_QS1000PCI)
byteout(cs->hw.elsa.cfg + 0x4c, 0x41); /* enable ELSA PCI IRQ */
else if (cs->subtyp == ELSA_QS3000PCI)
byteout(cs->hw.elsa.cfg + 0x4c, 0x43); /* enable ELSA PCI IRQ */
}
}
#if ARCOFI_USE
static void
set_arcofi(struct IsdnCardState *cs, int bc) {
cs->dc.isac.arcofi_bc = bc;
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_COP_5);
interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
}
static int
check_arcofi(struct IsdnCardState *cs)
{
int arcofi_present = 0;
char tmp[40];
char *t;
u_char *p;
if (!cs->dc.isac.mon_tx)
if (!(cs->dc.isac.mon_tx=kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "ISAC MON TX out of buffers!");
return(0);
}
cs->dc.isac.arcofi_bc = 0;
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_VERSION);
interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
if (!test_and_clear_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags)) {
debugl1(cs, "Arcofi response received %d bytes", cs->dc.isac.mon_rxp);
p = cs->dc.isac.mon_rx;
t = tmp;
t += sprintf(tmp, "Arcofi data");
QuickHex(t, p, cs->dc.isac.mon_rxp);
debugl1(cs, tmp);
if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) {
switch(cs->dc.isac.mon_rx[1]) {
case 0x80:
debugl1(cs, "Arcofi 2160 detected");
arcofi_present = 1;
break;
case 0x82:
debugl1(cs, "Arcofi 2165 detected");
arcofi_present = 2;
break;
case 0x84:
debugl1(cs, "Arcofi 2163 detected");
arcofi_present = 3;
break;
default:
debugl1(cs, "unknown Arcofi response");
break;
}
} else
debugl1(cs, "undefined Monitor response");
cs->dc.isac.mon_rxp = 0;
} else if (cs->dc.isac.mon_tx) {
debugl1(cs, "Arcofi not detected");
}
if (arcofi_present) {
if (cs->subtyp==ELSA_QS1000) {
cs->subtyp = ELSA_QS3000;
printk(KERN_INFO
"Elsa: %s detected modem at 0x%lx\n",
Elsa_Types[cs->subtyp],
cs->hw.elsa.base+8);
release_region(cs->hw.elsa.base, 8);
if (!request_region(cs->hw.elsa.base, 16, "elsa isdn modem")) {
printk(KERN_WARNING
"HiSax: %s config port %lx-%lx already in use\n",
Elsa_Types[cs->subtyp],
cs->hw.elsa.base + 8,
cs->hw.elsa.base + 16);
}
} else if (cs->subtyp==ELSA_PCC16) {
cs->subtyp = ELSA_PCF;
printk(KERN_INFO
"Elsa: %s detected modem at 0x%lx\n",
Elsa_Types[cs->subtyp],
cs->hw.elsa.base+8);
release_region(cs->hw.elsa.base, 8);
if (!request_region(cs->hw.elsa.base, 16, "elsa isdn modem")) {
printk(KERN_WARNING
"HiSax: %s config port %lx-%lx already in use\n",
Elsa_Types[cs->subtyp],
cs->hw.elsa.base + 8,
cs->hw.elsa.base + 16);
}
} else
printk(KERN_INFO
"Elsa: %s detected modem at 0x%lx\n",
Elsa_Types[cs->subtyp],
cs->hw.elsa.base+8);
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_XOP_0);
interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
return(1);
}
return(0);
}
#endif /* ARCOFI_USE */
static void
elsa_led_handler(struct IsdnCardState *cs)
{
int blink = 0;
if (cs->subtyp == ELSA_PCMCIA || cs->subtyp == ELSA_PCMCIA_IPAC)
return;
del_timer(&cs->hw.elsa.tl);
if (cs->hw.elsa.status & ELSA_ASSIGN)
cs->hw.elsa.ctrl_reg |= ELSA_STAT_LED;
else if (cs->hw.elsa.status & ELSA_BAD_PWR)
cs->hw.elsa.ctrl_reg &= ~ELSA_STAT_LED;
else {
cs->hw.elsa.ctrl_reg ^= ELSA_STAT_LED;
blink = 250;
}
if (cs->hw.elsa.status & 0xf000)
cs->hw.elsa.ctrl_reg |= ELSA_LINE_LED;
else if (cs->hw.elsa.status & 0x0f00) {
cs->hw.elsa.ctrl_reg ^= ELSA_LINE_LED;
blink = 500;
} else
cs->hw.elsa.ctrl_reg &= ~ELSA_LINE_LED;
if ((cs->subtyp == ELSA_QS1000PCI) ||
(cs->subtyp == ELSA_QS3000PCI)) {
u_char led = 0xff;
if (cs->hw.elsa.ctrl_reg & ELSA_LINE_LED)
led ^= ELSA_IPAC_LINE_LED;
if (cs->hw.elsa.ctrl_reg & ELSA_STAT_LED)
led ^= ELSA_IPAC_STAT_LED;
writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, led);
} else
byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
if (blink) {
init_timer(&cs->hw.elsa.tl);
cs->hw.elsa.tl.expires = jiffies + ((blink * HZ) / 1000);
add_timer(&cs->hw.elsa.tl);
}
}
static int
Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
int ret = 0;
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
reset_elsa(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_RELEASE:
release_io_elsa(cs);
return(0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
cs->debug |= L1_DEB_IPAC;
reset_elsa(cs);
inithscxisac(cs, 1);
if ((cs->subtyp == ELSA_QS1000) ||
(cs->subtyp == ELSA_QS3000))
{
byteout(cs->hw.elsa.timer, 0);
}
if (cs->hw.elsa.trig)
byteout(cs->hw.elsa.trig, 0xff);
inithscxisac(cs, 2);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_TEST:
if ((cs->subtyp == ELSA_PCMCIA) ||
(cs->subtyp == ELSA_PCMCIA_IPAC) ||
(cs->subtyp == ELSA_QS1000PCI)) {
return(0);
} else if (cs->subtyp == ELSA_QS3000PCI) {
ret = 0;
} else {
spin_lock_irqsave(&cs->lock, flags);
cs->hw.elsa.counter = 0;
cs->hw.elsa.ctrl_reg |= ELSA_ENA_TIMER_INT;
cs->hw.elsa.status |= ELIRQF_TIMER_AKTIV;
byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
byteout(cs->hw.elsa.timer, 0);
spin_unlock_irqrestore(&cs->lock, flags);
msleep(110);
spin_lock_irqsave(&cs->lock, flags);
cs->hw.elsa.ctrl_reg &= ~ELSA_ENA_TIMER_INT;
byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
cs->hw.elsa.status &= ~ELIRQF_TIMER_AKTIV;
spin_unlock_irqrestore(&cs->lock, flags);
printk(KERN_INFO "Elsa: %d timer tics in 110 msek\n",
cs->hw.elsa.counter);
if ((cs->hw.elsa.counter > 10) &&
(cs->hw.elsa.counter < 16)) {
printk(KERN_INFO "Elsa: timer and irq OK\n");
ret = 0;
} else {
printk(KERN_WARNING
"Elsa: timer tic problem (%d/12) maybe an IRQ(%d) conflict\n",
cs->hw.elsa.counter, cs->irq);
ret = 1;
}
}
#if ARCOFI_USE
if (check_arcofi(cs)) {
init_modem(cs);
}
#endif
elsa_led_handler(cs);
return(ret);
case (MDL_REMOVE | REQUEST):
cs->hw.elsa.status &= 0;
break;
case (MDL_ASSIGN | REQUEST):
cs->hw.elsa.status |= ELSA_ASSIGN;
break;
case MDL_INFO_SETUP:
if ((long) arg)
cs->hw.elsa.status |= 0x0200;
else
cs->hw.elsa.status |= 0x0100;
break;
case MDL_INFO_CONN:
if ((long) arg)
cs->hw.elsa.status |= 0x2000;
else
cs->hw.elsa.status |= 0x1000;
break;
case MDL_INFO_REL:
if ((long) arg) {
cs->hw.elsa.status &= ~0x2000;
cs->hw.elsa.status &= ~0x0200;
} else {
cs->hw.elsa.status &= ~0x1000;
cs->hw.elsa.status &= ~0x0100;
}
break;
#if ARCOFI_USE
case CARD_AUX_IND:
if (cs->hw.elsa.MFlag) {
int len;
u_char *msg;
if (!arg)
return(0);
msg = arg;
len = *msg;
msg++;
modem_write_cmd(cs, msg, len);
}
break;
#endif
}
if (cs->typ == ISDN_CTYPE_ELSA) {
int pwr = bytein(cs->hw.elsa.ale);
if (pwr & 0x08)
cs->hw.elsa.status |= ELSA_BAD_PWR;
else
cs->hw.elsa.status &= ~ELSA_BAD_PWR;
}
elsa_led_handler(cs);
return(ret);
}
static unsigned char
probe_elsa_adr(unsigned int adr, int typ)
{
int i, in1, in2, p16_1 = 0, p16_2 = 0, p8_1 = 0, p8_2 = 0, pc_1 = 0,
pc_2 = 0, pfp_1 = 0, pfp_2 = 0;
/* In case of the elsa pcmcia card, this region is in use,
reserved for us by the card manager. So we do not check it
here, it would fail. */
if (typ != ISDN_CTYPE_ELSA_PCMCIA) {
if (request_region(adr, 8, "elsa card")) {
release_region(adr, 8);
} else {
printk(KERN_WARNING
"Elsa: Probing Port 0x%x: already in use\n", adr);
return (0);
}
}
for (i = 0; i < 16; i++) {
in1 = inb(adr + ELSA_CONFIG); /* 'toggelt' bei */
in2 = inb(adr + ELSA_CONFIG); /* jedem Zugriff */
p16_1 += 0x04 & in1;
p16_2 += 0x04 & in2;
p8_1 += 0x02 & in1;
p8_2 += 0x02 & in2;
pc_1 += 0x01 & in1;
pc_2 += 0x01 & in2;
pfp_1 += 0x40 & in1;
pfp_2 += 0x40 & in2;
}
printk(KERN_INFO "Elsa: Probing IO 0x%x", adr);
if (65 == ++p16_1 * ++p16_2) {
printk(" PCC-16/PCF found\n");
return (ELSA_PCC16);
} else if (1025 == ++pfp_1 * ++pfp_2) {
printk(" PCF-Pro found\n");
return (ELSA_PCFPRO);
} else if (33 == ++p8_1 * ++p8_2) {
printk(" PCC8 found\n");
return (ELSA_PCC8);
} else if (17 == ++pc_1 * ++pc_2) {
printk(" PC found\n");
return (ELSA_PC);
} else {
printk(" failed\n");
return (0);
}
}
static unsigned int
probe_elsa(struct IsdnCardState *cs)
{
int i;
unsigned int CARD_portlist[] =
{0x160, 0x170, 0x260, 0x360, 0};
for (i = 0; CARD_portlist[i]; i++) {
if ((cs->subtyp = probe_elsa_adr(CARD_portlist[i], cs->typ)))
break;
}
return (CARD_portlist[i]);
}
static int __devinit
setup_elsa_isa(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
cs->hw.elsa.base = card->para[0];
printk(KERN_INFO "Elsa: Microlink IO probing\n");
if (cs->hw.elsa.base) {
if (!(cs->subtyp = probe_elsa_adr(cs->hw.elsa.base,
cs->typ))) {
printk(KERN_WARNING
"Elsa: no Elsa Microlink at %#lx\n",
cs->hw.elsa.base);
return (0);
}
} else
cs->hw.elsa.base = probe_elsa(cs);
if (!cs->hw.elsa.base) {
printk(KERN_WARNING
"No Elsa Microlink found\n");
return (0);
}
cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG;
cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL;
cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE;
cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC;
cs->hw.elsa.itac = cs->hw.elsa.base + ELSA_ITAC;
cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ;
cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER;
val = bytein(cs->hw.elsa.cfg);
if (cs->subtyp == ELSA_PC) {
const u_char CARD_IrqTab[8] =
{7, 3, 5, 9, 0, 0, 0, 0};
cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PC) >> 2];
} else if (cs->subtyp == ELSA_PCC8) {
const u_char CARD_IrqTab[8] =
{7, 3, 5, 9, 0, 0, 0, 0};
cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PCC8) >> 4];
} else {
const u_char CARD_IrqTab[8] =
{15, 10, 15, 3, 11, 5, 11, 9};
cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX) >> 3];
}
val = bytein(cs->hw.elsa.ale) & ELSA_HW_RELEASE;
if (val < 3)
val |= 8;
val += 'A' - 3;
if (val == 'B' || val == 'C')
val ^= 1;
if ((cs->subtyp == ELSA_PCFPRO) && (val == 'G'))
val = 'C';
printk(KERN_INFO
"Elsa: %s found at %#lx Rev.:%c IRQ %d\n",
Elsa_Types[cs->subtyp],
cs->hw.elsa.base,
val, cs->irq);
val = bytein(cs->hw.elsa.ale) & ELSA_S0_POWER_BAD;
if (val) {
printk(KERN_WARNING
"Elsa: Microlink S0 bus power bad\n");
cs->hw.elsa.status |= ELSA_BAD_PWR;
}
return (1);
}
#ifdef __ISAPNP__
static struct isapnp_device_id elsa_ids[] __devinitdata = {
{ ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133),
ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133),
(unsigned long) "Elsa QS1000" },
{ ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0134),
ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0134),
(unsigned long) "Elsa QS3000" },
{ 0, }
};
static struct isapnp_device_id *ipid __devinitdata = &elsa_ids[0];
static struct pnp_card *pnp_c __devinitdata = NULL;
#endif /* __ISAPNP__ */
static int __devinit
setup_elsa_isapnp(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
#ifdef __ISAPNP__
if (!card->para[1] && isapnp_present()) {
struct pnp_dev *pnp_d;
while(ipid->card_vendor) {
if ((pnp_c = pnp_find_card(ipid->card_vendor,
ipid->card_device, pnp_c))) {
pnp_d = NULL;
if ((pnp_d = pnp_find_dev(pnp_c,
ipid->vendor, ipid->function, pnp_d))) {
int err;
printk(KERN_INFO "HiSax: %s detected\n",
(char *)ipid->driver_data);
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
__func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
printk(KERN_ERR "Elsa PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
return(0);
}
if (ipid->function == ISAPNP_FUNCTION(0x133))
cs->subtyp = ELSA_QS1000;
else
cs->subtyp = ELSA_QS3000;
break;
} else {
printk(KERN_ERR "Elsa PnP: PnP error card found, no device\n");
return(0);
}
}
ipid++;
pnp_c=NULL;
}
if (!ipid->card_vendor) {
printk(KERN_INFO "Elsa PnP: no ISAPnP card found\n");
return(0);
}
}
#endif /* __ISAPNP__ */
if (card->para[1] && card->para[0]) {
cs->hw.elsa.base = card->para[1];
cs->irq = card->para[0];
if (!cs->subtyp)
cs->subtyp = ELSA_QS1000;
} else {
printk(KERN_ERR "Elsa PnP: no parameter\n");
}
cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG;
cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE;
cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC;
cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ;
cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER;
cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL;
printk(KERN_INFO
"Elsa: %s defined at %#lx IRQ %d\n",
Elsa_Types[cs->subtyp],
cs->hw.elsa.base,
cs->irq);
return (1);
}
static void __devinit
setup_elsa_pcmcia(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
cs->hw.elsa.base = card->para[1];
cs->irq = card->para[0];
val = readreg(cs->hw.elsa.base + 0, cs->hw.elsa.base + 2, IPAC_ID);
if ((val == 1) || (val == 2)) { /* IPAC version 1.1/1.2 */
cs->subtyp = ELSA_PCMCIA_IPAC;
cs->hw.elsa.ale = cs->hw.elsa.base + 0;
cs->hw.elsa.isac = cs->hw.elsa.base + 2;
cs->hw.elsa.hscx = cs->hw.elsa.base + 2;
test_and_set_bit(HW_IPAC, &cs->HW_Flags);
} else {
cs->subtyp = ELSA_PCMCIA;
cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE_PCM;
cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC_PCM;
cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
}
cs->hw.elsa.timer = 0;
cs->hw.elsa.trig = 0;
cs->hw.elsa.ctrl = 0;
cs->irq_flags |= IRQF_SHARED;
printk(KERN_INFO
"Elsa: %s defined at %#lx IRQ %d\n",
Elsa_Types[cs->subtyp],
cs->hw.elsa.base,
cs->irq);
}
#ifdef CONFIG_PCI
static struct pci_dev *dev_qs1000 __devinitdata = NULL;
static struct pci_dev *dev_qs3000 __devinitdata = NULL;
static int __devinit
setup_elsa_pci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
cs->subtyp = 0;
if ((dev_qs1000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA,
PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) {
if (pci_enable_device(dev_qs1000))
return(0);
cs->subtyp = ELSA_QS1000PCI;
cs->irq = dev_qs1000->irq;
cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1);
cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3);
} else if ((dev_qs3000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA,
PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) {
if (pci_enable_device(dev_qs3000))
return(0);
cs->subtyp = ELSA_QS3000PCI;
cs->irq = dev_qs3000->irq;
cs->hw.elsa.cfg = pci_resource_start(dev_qs3000, 1);
cs->hw.elsa.base = pci_resource_start(dev_qs3000, 3);
} else {
printk(KERN_WARNING "Elsa: No PCI card found\n");
return(0);
}
if (!cs->irq) {
printk(KERN_WARNING "Elsa: No IRQ for PCI card found\n");
return(0);
}
if (!(cs->hw.elsa.base && cs->hw.elsa.cfg)) {
printk(KERN_WARNING "Elsa: No IO-Adr for PCI card found\n");
return(0);
}
if ((cs->hw.elsa.cfg & 0xff) || (cs->hw.elsa.base & 0xf)) {
printk(KERN_WARNING "Elsa: You may have a wrong PCI bios\n");
printk(KERN_WARNING "Elsa: If your system hangs now, read\n");
printk(KERN_WARNING "Elsa: Documentation/isdn/README.HiSax\n");
}
cs->hw.elsa.ale = cs->hw.elsa.base;
cs->hw.elsa.isac = cs->hw.elsa.base +1;
cs->hw.elsa.hscx = cs->hw.elsa.base +1;
test_and_set_bit(HW_IPAC, &cs->HW_Flags);
cs->hw.elsa.timer = 0;
cs->hw.elsa.trig = 0;
cs->irq_flags |= IRQF_SHARED;
printk(KERN_INFO
"Elsa: %s defined at %#lx/0x%x IRQ %d\n",
Elsa_Types[cs->subtyp],
cs->hw.elsa.base,
cs->hw.elsa.cfg,
cs->irq);
return (1);
}
#else
static int __devinit
setup_elsa_pci(struct IsdnCard *card)
{
return (1);
}
#endif /* CONFIG_PCI */
static int __devinit
setup_elsa_common(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
int bytecnt;
switch (cs->subtyp) {
case ELSA_PC:
case ELSA_PCC8:
case ELSA_PCC16:
case ELSA_QS1000:
case ELSA_PCMCIA:
case ELSA_PCMCIA_IPAC:
bytecnt = 8;
break;
case ELSA_PCFPRO:
case ELSA_PCF:
case ELSA_QS3000:
case ELSA_QS3000PCI:
bytecnt = 16;
break;
case ELSA_QS1000PCI:
bytecnt = 2;
break;
default:
printk(KERN_WARNING
"Unknown ELSA subtype %d\n", cs->subtyp);
return (0);
}
/* In case of the elsa pcmcia card, this region is in use,
reserved for us by the card manager. So we do not check it
here, it would fail. */
if (cs->typ != ISDN_CTYPE_ELSA_PCMCIA && !request_region(cs->hw.elsa.base, bytecnt, "elsa isdn")) {
printk(KERN_WARNING
"HiSax: ELSA config port %#lx-%#lx already in use\n",
cs->hw.elsa.base,
cs->hw.elsa.base + bytecnt);
return (0);
}
if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI)) {
if (!request_region(cs->hw.elsa.cfg, 0x80, "elsa isdn pci")) {
printk(KERN_WARNING
"HiSax: ELSA pci port %x-%x already in use\n",
cs->hw.elsa.cfg,
cs->hw.elsa.cfg + 0x80);
release_region(cs->hw.elsa.base, bytecnt);
return (0);
}
}
#if ARCOFI_USE
init_arcofi(cs);
#endif
setup_isac(cs);
cs->hw.elsa.tl.function = (void *) elsa_led_handler;
cs->hw.elsa.tl.data = (long) cs;
init_timer(&cs->hw.elsa.tl);
/* Teste Timer */
if (cs->hw.elsa.timer) {
byteout(cs->hw.elsa.trig, 0xff);
byteout(cs->hw.elsa.timer, 0);
if (!TimerRun(cs)) {
byteout(cs->hw.elsa.timer, 0); /* 2. Versuch */
if (!TimerRun(cs)) {
printk(KERN_WARNING
"Elsa: timer do not start\n");
release_io_elsa(cs);
return (0);
}
}
HZDELAY((HZ/100) + 1); /* wait >=10 ms */
if (TimerRun(cs)) {
printk(KERN_WARNING "Elsa: timer do not run down\n");
release_io_elsa(cs);
return (0);
}
printk(KERN_INFO "Elsa: timer OK; resetting card\n");
}
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &Elsa_card_msg;
if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI) || (cs->subtyp == ELSA_PCMCIA_IPAC)) {
cs->readisac = &ReadISAC_IPAC;
cs->writeisac = &WriteISAC_IPAC;
cs->readisacfifo = &ReadISACfifo_IPAC;
cs->writeisacfifo = &WriteISACfifo_IPAC;
cs->irq_func = &elsa_interrupt_ipac;
val = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ID);
printk(KERN_INFO "Elsa: IPAC version %x\n", val);
} else {
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->irq_func = &elsa_interrupt;
ISACVersion(cs, "Elsa:");
if (HscxVersion(cs, "Elsa:")) {
printk(KERN_WARNING
"Elsa: wrong HSCX versions check IO address\n");
release_io_elsa(cs);
return (0);
}
}
if (cs->subtyp == ELSA_PC) {
val = readitac(cs, ITAC_SYS);
printk(KERN_INFO "Elsa: ITAC version %s\n", ITACVer[val & 7]);
writeitac(cs, ITAC_ISEN, 0);
writeitac(cs, ITAC_RFIE, 0);
writeitac(cs, ITAC_XFIE, 0);
writeitac(cs, ITAC_SCIE, 0);
writeitac(cs, ITAC_STIE, 0);
}
return (1);
}
int __devinit
setup_elsa(struct IsdnCard *card)
{
int rc;
struct IsdnCardState *cs = card->cs;
char tmp[64];
strcpy(tmp, Elsa_revision);
printk(KERN_INFO "HiSax: Elsa driver Rev. %s\n", HiSax_getrev(tmp));
cs->hw.elsa.ctrl_reg = 0;
cs->hw.elsa.status = 0;
cs->hw.elsa.MFlag = 0;
cs->subtyp = 0;
if (cs->typ == ISDN_CTYPE_ELSA) {
rc = setup_elsa_isa(card);
if (!rc)
return (0);
} else if (cs->typ == ISDN_CTYPE_ELSA_PNP) {
rc = setup_elsa_isapnp(card);
if (!rc)
return (0);
} else if (cs->typ == ISDN_CTYPE_ELSA_PCMCIA)
setup_elsa_pcmcia(card);
else if (cs->typ == ISDN_CTYPE_ELSA_PCI) {
rc = setup_elsa_pci(card);
if (!rc)
return (0);
} else
return (0);
return setup_elsa_common(card);
}
| gpl-2.0 |
daivietpda/M7WLJ-5.0.2 | fs/lockd/host.c | 4741 | 16817 | /*
* linux/fs/lockd/host.c
*
* Management for NLM peer hosts. The nlm_host struct is shared
* between client and server implementation. The only reason to
* do so is to reduce code bloat.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/mutex.h>
#include <linux/sunrpc/svc_xprt.h>
#include <net/ipv6.h>
#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
#define NLM_HOST_NRHASH 32
#define NLM_HOST_REBIND (60 * HZ)
#define NLM_HOST_EXPIRE (300 * HZ)
#define NLM_HOST_COLLECT (120 * HZ)
static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH];
static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
#define for_each_host(host, pos, chain, table) \
for ((chain) = (table); \
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
hlist_for_each_entry((host), (pos), (chain), h_hash)
#define for_each_host_safe(host, pos, next, chain, table) \
for ((chain) = (table); \
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
hlist_for_each_entry_safe((host), (pos), (next), \
(chain), h_hash)
static unsigned long next_gc;
static unsigned long nrhosts;
static DEFINE_MUTEX(nlm_host_mutex);
static void nlm_gc_hosts(void);
struct nlm_lookup_host_info {
const int server; /* search for server|client */
const struct sockaddr *sap; /* address to search for */
const size_t salen; /* it's length */
const unsigned short protocol; /* transport to search for*/
const u32 version; /* NLM version to search for */
const char *hostname; /* remote's hostname */
const size_t hostname_len; /* it's length */
const int noresvport; /* use non-priv port */
struct net *net; /* network namespace to bind */
};
/*
* Hash function must work well on big- and little-endian platforms
*/
static unsigned int __nlm_hash32(const __be32 n)
{
unsigned int hash = (__force u32)n ^ ((__force u32)n >> 16);
return hash ^ (hash >> 8);
}
static unsigned int __nlm_hash_addr4(const struct sockaddr *sap)
{
const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
return __nlm_hash32(sin->sin_addr.s_addr);
}
static unsigned int __nlm_hash_addr6(const struct sockaddr *sap)
{
const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
const struct in6_addr addr = sin6->sin6_addr;
return __nlm_hash32(addr.s6_addr32[0]) ^
__nlm_hash32(addr.s6_addr32[1]) ^
__nlm_hash32(addr.s6_addr32[2]) ^
__nlm_hash32(addr.s6_addr32[3]);
}
static unsigned int nlm_hash_address(const struct sockaddr *sap)
{
unsigned int hash;
switch (sap->sa_family) {
case AF_INET:
hash = __nlm_hash_addr4(sap);
break;
case AF_INET6:
hash = __nlm_hash_addr6(sap);
break;
default:
hash = 0;
}
return hash & (NLM_HOST_NRHASH - 1);
}
/*
* Allocate and initialize an nlm_host. Common to both client and server.
*/
static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
struct nsm_handle *nsm)
{
struct nlm_host *host = NULL;
unsigned long now = jiffies;
if (nsm != NULL)
atomic_inc(&nsm->sm_count);
else {
host = NULL;
nsm = nsm_get_handle(ni->sap, ni->salen,
ni->hostname, ni->hostname_len);
if (unlikely(nsm == NULL)) {
dprintk("lockd: %s failed; no nsm handle\n",
__func__);
goto out;
}
}
host = kmalloc(sizeof(*host), GFP_KERNEL);
if (unlikely(host == NULL)) {
dprintk("lockd: %s failed; no memory\n", __func__);
nsm_release(nsm);
goto out;
}
memcpy(nlm_addr(host), ni->sap, ni->salen);
host->h_addrlen = ni->salen;
rpc_set_port(nlm_addr(host), 0);
host->h_srcaddrlen = 0;
host->h_rpcclnt = NULL;
host->h_name = nsm->sm_name;
host->h_version = ni->version;
host->h_proto = ni->protocol;
host->h_reclaiming = 0;
host->h_server = ni->server;
host->h_noresvport = ni->noresvport;
host->h_inuse = 0;
init_waitqueue_head(&host->h_gracewait);
init_rwsem(&host->h_rwsem);
host->h_state = 0;
host->h_nsmstate = 0;
host->h_pidcount = 0;
atomic_set(&host->h_count, 1);
mutex_init(&host->h_mutex);
host->h_nextrebind = now + NLM_HOST_REBIND;
host->h_expires = now + NLM_HOST_EXPIRE;
INIT_LIST_HEAD(&host->h_lockowners);
spin_lock_init(&host->h_lock);
INIT_LIST_HEAD(&host->h_granted);
INIT_LIST_HEAD(&host->h_reclaim);
host->h_nsmhandle = nsm;
host->h_addrbuf = nsm->sm_addrbuf;
host->net = ni->net;
out:
return host;
}
/*
* Destroy an nlm_host and free associated resources
*
* Caller must hold nlm_host_mutex.
*/
static void nlm_destroy_host_locked(struct nlm_host *host)
{
struct rpc_clnt *clnt;
dprintk("lockd: destroy host %s\n", host->h_name);
BUG_ON(!list_empty(&host->h_lockowners));
BUG_ON(atomic_read(&host->h_count));
hlist_del_init(&host->h_hash);
nsm_unmonitor(host);
nsm_release(host->h_nsmhandle);
clnt = host->h_rpcclnt;
if (clnt != NULL)
rpc_shutdown_client(clnt);
kfree(host);
nrhosts--;
}
/**
* nlmclnt_lookup_host - Find an NLM host handle matching a remote server
* @sap: network address of server
* @salen: length of server address
* @protocol: transport protocol to use
* @version: NLM protocol version
* @hostname: '\0'-terminated hostname of server
* @noresvport: 1 if non-privileged port should be used
*
* Returns an nlm_host structure that matches the passed-in
* [server address, transport protocol, NLM version, server hostname].
* If one doesn't already exist in the host cache, a new handle is
* created and returned.
*/
struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
const size_t salen,
const unsigned short protocol,
const u32 version,
const char *hostname,
int noresvport,
struct net *net)
{
struct nlm_lookup_host_info ni = {
.server = 0,
.sap = sap,
.salen = salen,
.protocol = protocol,
.version = version,
.hostname = hostname,
.hostname_len = strlen(hostname),
.noresvport = noresvport,
.net = net,
};
struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host;
struct nsm_handle *nsm = NULL;
dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__,
(hostname ? hostname : "<none>"), version,
(protocol == IPPROTO_UDP ? "udp" : "tcp"));
mutex_lock(&nlm_host_mutex);
chain = &nlm_client_hosts[nlm_hash_address(sap)];
hlist_for_each_entry(host, pos, chain, h_hash) {
if (host->net != net)
continue;
if (!rpc_cmp_addr(nlm_addr(host), sap))
continue;
/* Same address. Share an NSM handle if we already have one */
if (nsm == NULL)
nsm = host->h_nsmhandle;
if (host->h_proto != protocol)
continue;
if (host->h_version != version)
continue;
nlm_get_host(host);
dprintk("lockd: %s found host %s (%s)\n", __func__,
host->h_name, host->h_addrbuf);
goto out;
}
host = nlm_alloc_host(&ni, nsm);
if (unlikely(host == NULL))
goto out;
hlist_add_head(&host->h_hash, chain);
nrhosts++;
dprintk("lockd: %s created host %s (%s)\n", __func__,
host->h_name, host->h_addrbuf);
out:
mutex_unlock(&nlm_host_mutex);
return host;
}
/**
* nlmclnt_release_host - release client nlm_host
* @host: nlm_host to release
*
*/
void nlmclnt_release_host(struct nlm_host *host)
{
if (host == NULL)
return;
dprintk("lockd: release client host %s\n", host->h_name);
BUG_ON(atomic_read(&host->h_count) < 0);
BUG_ON(host->h_server);
if (atomic_dec_and_test(&host->h_count)) {
BUG_ON(!list_empty(&host->h_lockowners));
BUG_ON(!list_empty(&host->h_granted));
BUG_ON(!list_empty(&host->h_reclaim));
mutex_lock(&nlm_host_mutex);
nlm_destroy_host_locked(host);
mutex_unlock(&nlm_host_mutex);
}
}
/**
* nlmsvc_lookup_host - Find an NLM host handle matching a remote client
* @rqstp: incoming NLM request
* @hostname: name of client host
* @hostname_len: length of client hostname
*
* Returns an nlm_host structure that matches the [client address,
* transport protocol, NLM version, client hostname] of the passed-in
* NLM request. If one doesn't already exist in the host cache, a
* new handle is created and returned.
*
* Before possibly creating a new nlm_host, construct a sockaddr
* for a specific source address in case the local system has
* multiple network addresses. The family of the address in
* rq_daddr is guaranteed to be the same as the family of the
* address in rq_addr, so it's safe to use the same family for
* the source address.
*/
struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
const char *hostname,
const size_t hostname_len)
{
struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host = NULL;
struct nsm_handle *nsm = NULL;
struct sockaddr *src_sap = svc_daddr(rqstp);
size_t src_len = rqstp->rq_daddrlen;
struct net *net = rqstp->rq_xprt->xpt_net;
struct nlm_lookup_host_info ni = {
.server = 1,
.sap = svc_addr(rqstp),
.salen = rqstp->rq_addrlen,
.protocol = rqstp->rq_prot,
.version = rqstp->rq_vers,
.hostname = hostname,
.hostname_len = hostname_len,
.net = net,
};
dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
(int)hostname_len, hostname, rqstp->rq_vers,
(rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
mutex_lock(&nlm_host_mutex);
if (time_after_eq(jiffies, next_gc))
nlm_gc_hosts();
chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
hlist_for_each_entry(host, pos, chain, h_hash) {
if (host->net != net)
continue;
if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
continue;
/* Same address. Share an NSM handle if we already have one */
if (nsm == NULL)
nsm = host->h_nsmhandle;
if (host->h_proto != ni.protocol)
continue;
if (host->h_version != ni.version)
continue;
if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap))
continue;
/* Move to head of hash chain. */
hlist_del(&host->h_hash);
hlist_add_head(&host->h_hash, chain);
nlm_get_host(host);
dprintk("lockd: %s found host %s (%s)\n",
__func__, host->h_name, host->h_addrbuf);
goto out;
}
host = nlm_alloc_host(&ni, nsm);
if (unlikely(host == NULL))
goto out;
memcpy(nlm_srcaddr(host), src_sap, src_len);
host->h_srcaddrlen = src_len;
hlist_add_head(&host->h_hash, chain);
nrhosts++;
dprintk("lockd: %s created host %s (%s)\n",
__func__, host->h_name, host->h_addrbuf);
out:
mutex_unlock(&nlm_host_mutex);
return host;
}
/**
* nlmsvc_release_host - release server nlm_host
* @host: nlm_host to release
*
* Host is destroyed later in nlm_gc_host().
*/
void nlmsvc_release_host(struct nlm_host *host)
{
if (host == NULL)
return;
dprintk("lockd: release server host %s\n", host->h_name);
BUG_ON(atomic_read(&host->h_count) < 0);
BUG_ON(!host->h_server);
atomic_dec(&host->h_count);
}
/*
* Create the NLM RPC client for an NLM peer
*/
struct rpc_clnt *
nlm_bind_host(struct nlm_host *host)
{
struct rpc_clnt *clnt;
dprintk("lockd: nlm_bind_host %s (%s)\n",
host->h_name, host->h_addrbuf);
/* Lock host handle */
mutex_lock(&host->h_mutex);
/* If we've already created an RPC client, check whether
* RPC rebind is required
*/
if ((clnt = host->h_rpcclnt) != NULL) {
if (time_after_eq(jiffies, host->h_nextrebind)) {
rpc_force_rebind(clnt);
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
dprintk("lockd: next rebind in %lu jiffies\n",
host->h_nextrebind - jiffies);
}
} else {
unsigned long increment = nlmsvc_timeout;
struct rpc_timeout timeparms = {
.to_initval = increment,
.to_increment = increment,
.to_maxval = increment * 6UL,
.to_retries = 5U,
};
struct rpc_create_args args = {
.net = host->net,
.protocol = host->h_proto,
.address = nlm_addr(host),
.addrsize = host->h_addrlen,
.timeout = &timeparms,
.servername = host->h_name,
.program = &nlm_program,
.version = host->h_version,
.authflavor = RPC_AUTH_UNIX,
.flags = (RPC_CLNT_CREATE_NOPING |
RPC_CLNT_CREATE_AUTOBIND),
};
/*
* lockd retries server side blocks automatically so we want
* those to be soft RPC calls. Client side calls need to be
* hard RPC tasks.
*/
if (!host->h_server)
args.flags |= RPC_CLNT_CREATE_HARDRTRY;
if (host->h_noresvport)
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
if (host->h_srcaddrlen)
args.saddress = nlm_srcaddr(host);
clnt = rpc_create(&args);
if (!IS_ERR(clnt))
host->h_rpcclnt = clnt;
else {
printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
clnt = NULL;
}
}
mutex_unlock(&host->h_mutex);
return clnt;
}
/*
* Force a portmap lookup of the remote lockd port
*/
void
nlm_rebind_host(struct nlm_host *host)
{
dprintk("lockd: rebind host %s\n", host->h_name);
if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
rpc_force_rebind(host->h_rpcclnt);
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
}
}
/*
* Increment NLM host count
*/
struct nlm_host * nlm_get_host(struct nlm_host *host)
{
if (host) {
dprintk("lockd: get host %s\n", host->h_name);
atomic_inc(&host->h_count);
host->h_expires = jiffies + NLM_HOST_EXPIRE;
}
return host;
}
static struct nlm_host *next_host_state(struct hlist_head *cache,
struct nsm_handle *nsm,
const struct nlm_reboot *info)
{
struct nlm_host *host;
struct hlist_head *chain;
struct hlist_node *pos;
mutex_lock(&nlm_host_mutex);
for_each_host(host, pos, chain, cache) {
if (host->h_nsmhandle == nsm
&& host->h_nsmstate != info->state) {
host->h_nsmstate = info->state;
host->h_state++;
nlm_get_host(host);
mutex_unlock(&nlm_host_mutex);
return host;
}
}
mutex_unlock(&nlm_host_mutex);
return NULL;
}
/**
* nlm_host_rebooted - Release all resources held by rebooted host
* @info: pointer to decoded results of NLM_SM_NOTIFY call
*
* We were notified that the specified host has rebooted. Release
* all resources held by that peer.
*/
void nlm_host_rebooted(const struct nlm_reboot *info)
{
struct nsm_handle *nsm;
struct nlm_host *host;
nsm = nsm_reboot_lookup(info);
if (unlikely(nsm == NULL))
return;
/* Mark all hosts tied to this NSM state as having rebooted.
* We run the loop repeatedly, because we drop the host table
* lock for this.
* To avoid processing a host several times, we match the nsmstate.
*/
while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) {
nlmsvc_free_host_resources(host);
nlmsvc_release_host(host);
}
while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) {
nlmclnt_recovery(host);
nlmclnt_release_host(host);
}
nsm_release(nsm);
}
void
nlm_shutdown_hosts_net(struct net *net)
{
struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host;
dprintk("lockd: shutting down host module\n");
mutex_lock(&nlm_host_mutex);
/* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts...\n");
for_each_host(host, pos, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
host->h_expires = jiffies - 1;
if (host->h_rpcclnt) {
rpc_shutdown_client(host->h_rpcclnt);
host->h_rpcclnt = NULL;
}
}
/* Then, perform a garbage collection pass */
nlm_gc_hosts();
mutex_unlock(&nlm_host_mutex);
}
/*
* Shut down the hosts module.
* Note that this routine is called only at server shutdown time.
*/
void
nlm_shutdown_hosts(void)
{
struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host;
nlm_shutdown_hosts_net(NULL);
/* complain if any hosts are left */
if (nrhosts != 0) {
printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
dprintk("lockd: %lu hosts left:\n", nrhosts);
for_each_host(host, pos, chain, nlm_server_hosts) {
dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
host->h_name, atomic_read(&host->h_count),
host->h_inuse, host->h_expires, host->net);
}
}
}
/*
* Garbage collect any unused NLM hosts.
* This GC combines reference counting for async operations with
* mark & sweep for resources held by remote clients.
*/
static void
nlm_gc_hosts(void)
{
struct hlist_head *chain;
struct hlist_node *pos, *next;
struct nlm_host *host;
dprintk("lockd: host garbage collection\n");
for_each_host(host, pos, chain, nlm_server_hosts)
host->h_inuse = 0;
/* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources();
for_each_host_safe(host, pos, next, chain, nlm_server_hosts) {
if (atomic_read(&host->h_count) || host->h_inuse
|| time_before(jiffies, host->h_expires)) {
dprintk("nlm_gc_hosts skipping %s "
"(cnt %d use %d exp %ld)\n",
host->h_name, atomic_read(&host->h_count),
host->h_inuse, host->h_expires);
continue;
}
nlm_destroy_host_locked(host);
}
next_gc = jiffies + NLM_HOST_COLLECT;
}
| gpl-2.0 |
Kernel-Saram/IM-A710K-Nova_Kernel-Project | scripts/dtc/libfdt/fdt_sw.c | 7557 | 7175 | /*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
static int _fdt_sw_check_header(void *fdt)
{
if (fdt_magic(fdt) != FDT_SW_MAGIC)
return -FDT_ERR_BADMAGIC;
/* FIXME: should check more details about the header state */
return 0;
}
#define FDT_SW_CHECK_HEADER(fdt) \
{ \
int err; \
if ((err = _fdt_sw_check_header(fdt)) != 0) \
return err; \
}
static void *_fdt_grab_space(void *fdt, int len)
{
int offset = fdt_size_dt_struct(fdt);
int spaceleft;
spaceleft = fdt_totalsize(fdt) - fdt_off_dt_struct(fdt)
- fdt_size_dt_strings(fdt);
if ((offset + len < offset) || (offset + len > spaceleft))
return NULL;
fdt_set_size_dt_struct(fdt, offset + len);
return fdt_offset_ptr_w(fdt, offset, len);
}
int fdt_create(void *buf, int bufsize)
{
void *fdt = buf;
if (bufsize < sizeof(struct fdt_header))
return -FDT_ERR_NOSPACE;
memset(buf, 0, bufsize);
fdt_set_magic(fdt, FDT_SW_MAGIC);
fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION);
fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION);
fdt_set_totalsize(fdt, bufsize);
fdt_set_off_mem_rsvmap(fdt, FDT_ALIGN(sizeof(struct fdt_header),
sizeof(struct fdt_reserve_entry)));
fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt));
fdt_set_off_dt_strings(fdt, bufsize);
return 0;
}
int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size)
{
struct fdt_reserve_entry *re;
int offset;
FDT_SW_CHECK_HEADER(fdt);
if (fdt_size_dt_struct(fdt))
return -FDT_ERR_BADSTATE;
offset = fdt_off_dt_struct(fdt);
if ((offset + sizeof(*re)) > fdt_totalsize(fdt))
return -FDT_ERR_NOSPACE;
re = (struct fdt_reserve_entry *)((char *)fdt + offset);
re->address = cpu_to_fdt64(addr);
re->size = cpu_to_fdt64(size);
fdt_set_off_dt_struct(fdt, offset + sizeof(*re));
return 0;
}
int fdt_finish_reservemap(void *fdt)
{
return fdt_add_reservemap_entry(fdt, 0, 0);
}
int fdt_begin_node(void *fdt, const char *name)
{
struct fdt_node_header *nh;
int namelen = strlen(name) + 1;
FDT_SW_CHECK_HEADER(fdt);
nh = _fdt_grab_space(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen));
if (! nh)
return -FDT_ERR_NOSPACE;
nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
memcpy(nh->name, name, namelen);
return 0;
}
int fdt_end_node(void *fdt)
{
uint32_t *en;
FDT_SW_CHECK_HEADER(fdt);
en = _fdt_grab_space(fdt, FDT_TAGSIZE);
if (! en)
return -FDT_ERR_NOSPACE;
*en = cpu_to_fdt32(FDT_END_NODE);
return 0;
}
static int _fdt_find_add_string(void *fdt, const char *s)
{
char *strtab = (char *)fdt + fdt_totalsize(fdt);
const char *p;
int strtabsize = fdt_size_dt_strings(fdt);
int len = strlen(s) + 1;
int struct_top, offset;
p = _fdt_find_string(strtab - strtabsize, strtabsize, s);
if (p)
return p - strtab;
/* Add it */
offset = -strtabsize - len;
struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
if (fdt_totalsize(fdt) + offset < struct_top)
return 0; /* no more room :( */
memcpy(strtab + offset, s, len);
fdt_set_size_dt_strings(fdt, strtabsize + len);
return offset;
}
int fdt_property(void *fdt, const char *name, const void *val, int len)
{
struct fdt_property *prop;
int nameoff;
FDT_SW_CHECK_HEADER(fdt);
nameoff = _fdt_find_add_string(fdt, name);
if (nameoff == 0)
return -FDT_ERR_NOSPACE;
prop = _fdt_grab_space(fdt, sizeof(*prop) + FDT_TAGALIGN(len));
if (! prop)
return -FDT_ERR_NOSPACE;
prop->tag = cpu_to_fdt32(FDT_PROP);
prop->nameoff = cpu_to_fdt32(nameoff);
prop->len = cpu_to_fdt32(len);
memcpy(prop->data, val, len);
return 0;
}
int fdt_finish(void *fdt)
{
char *p = (char *)fdt;
uint32_t *end;
int oldstroffset, newstroffset;
uint32_t tag;
int offset, nextoffset;
FDT_SW_CHECK_HEADER(fdt);
/* Add terminator */
end = _fdt_grab_space(fdt, sizeof(*end));
if (! end)
return -FDT_ERR_NOSPACE;
*end = cpu_to_fdt32(FDT_END);
/* Relocate the string table */
oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt);
newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt));
fdt_set_off_dt_strings(fdt, newstroffset);
/* Walk the structure, correcting string offsets */
offset = 0;
while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) {
if (tag == FDT_PROP) {
struct fdt_property *prop =
fdt_offset_ptr_w(fdt, offset, sizeof(*prop));
int nameoff;
if (! prop)
return -FDT_ERR_BADSTRUCTURE;
nameoff = fdt32_to_cpu(prop->nameoff);
nameoff += fdt_size_dt_strings(fdt);
prop->nameoff = cpu_to_fdt32(nameoff);
}
offset = nextoffset;
}
/* Finally, adjust the header */
fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt));
fdt_set_magic(fdt, FDT_MAGIC);
return 0;
}
| gpl-2.0 |
horuscentro/android_kernel_motorola_msm8226 | tools/testing/selftests/breakpoints/breakpoint_test.c | 8325 | 7343 | /*
* Copyright (C) 2011 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
*
* Licensed under the terms of the GNU GPL License version 2
*
* Selftests for breakpoints (and more generally the do_debug() path) in x86.
*/
#include <sys/ptrace.h>
#include <unistd.h>
#include <stddef.h>
#include <sys/user.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
/* Breakpoint access modes */
enum {
BP_X = 1,
BP_RW = 2,
BP_W = 4,
};
static pid_t child_pid;
/*
* Ensures the child and parent are always "talking" about
* the same test sequence. (ie: that we haven't forgotten
* to call check_trapped() somewhere).
*/
static int nr_tests;
static void set_breakpoint_addr(void *addr, int n)
{
int ret;
ret = ptrace(PTRACE_POKEUSER, child_pid,
offsetof(struct user, u_debugreg[n]), addr);
if (ret) {
perror("Can't set breakpoint addr\n");
exit(-1);
}
}
static void toggle_breakpoint(int n, int type, int len,
int local, int global, int set)
{
int ret;
int xtype, xlen;
unsigned long vdr7, dr7;
switch (type) {
case BP_X:
xtype = 0;
break;
case BP_W:
xtype = 1;
break;
case BP_RW:
xtype = 3;
break;
}
switch (len) {
case 1:
xlen = 0;
break;
case 2:
xlen = 4;
break;
case 4:
xlen = 0xc;
break;
case 8:
xlen = 8;
break;
}
dr7 = ptrace(PTRACE_PEEKUSER, child_pid,
offsetof(struct user, u_debugreg[7]), 0);
vdr7 = (xlen | xtype) << 16;
vdr7 <<= 4 * n;
if (local) {
vdr7 |= 1 << (2 * n);
vdr7 |= 1 << 8;
}
if (global) {
vdr7 |= 2 << (2 * n);
vdr7 |= 1 << 9;
}
if (set)
dr7 |= vdr7;
else
dr7 &= ~vdr7;
ret = ptrace(PTRACE_POKEUSER, child_pid,
offsetof(struct user, u_debugreg[7]), dr7);
if (ret) {
perror("Can't set dr7");
exit(-1);
}
}
/* Dummy variables to test read/write accesses */
static unsigned long long dummy_var[4];
/* Dummy functions to test execution accesses */
static void dummy_func(void) { }
static void dummy_func1(void) { }
static void dummy_func2(void) { }
static void dummy_func3(void) { }
static void (*dummy_funcs[])(void) = {
dummy_func,
dummy_func1,
dummy_func2,
dummy_func3,
};
static int trapped;
static void check_trapped(void)
{
/*
* If we haven't trapped, wake up the parent
* so that it notices the failure.
*/
if (!trapped)
kill(getpid(), SIGUSR1);
trapped = 0;
nr_tests++;
}
static void write_var(int len)
{
char *pcval; short *psval; int *pival; long long *plval;
int i;
for (i = 0; i < 4; i++) {
switch (len) {
case 1:
pcval = (char *)&dummy_var[i];
*pcval = 0xff;
break;
case 2:
psval = (short *)&dummy_var[i];
*psval = 0xffff;
break;
case 4:
pival = (int *)&dummy_var[i];
*pival = 0xffffffff;
break;
case 8:
plval = (long long *)&dummy_var[i];
*plval = 0xffffffffffffffffLL;
break;
}
check_trapped();
}
}
static void read_var(int len)
{
char cval; short sval; int ival; long long lval;
int i;
for (i = 0; i < 4; i++) {
switch (len) {
case 1:
cval = *(char *)&dummy_var[i];
break;
case 2:
sval = *(short *)&dummy_var[i];
break;
case 4:
ival = *(int *)&dummy_var[i];
break;
case 8:
lval = *(long long *)&dummy_var[i];
break;
}
check_trapped();
}
}
/*
* Do the r/w/x accesses to trigger the breakpoints. And run
* the usual traps.
*/
static void trigger_tests(void)
{
int len, local, global, i;
char val;
int ret;
ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
if (ret) {
perror("Can't be traced?\n");
return;
}
/* Wake up father so that it sets up the first test */
kill(getpid(), SIGUSR1);
/* Test instruction breakpoints */
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
for (i = 0; i < 4; i++) {
dummy_funcs[i]();
check_trapped();
}
}
}
/* Test write watchpoints */
for (len = 1; len <= sizeof(long); len <<= 1) {
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
write_var(len);
}
}
}
/* Test read/write watchpoints (on read accesses) */
for (len = 1; len <= sizeof(long); len <<= 1) {
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
read_var(len);
}
}
}
/* Icebp trap */
asm(".byte 0xf1\n");
check_trapped();
/* Int 3 trap */
asm("int $3\n");
check_trapped();
kill(getpid(), SIGUSR1);
}
static void check_success(const char *msg)
{
const char *msg2;
int child_nr_tests;
int status;
/* Wait for the child to SIGTRAP */
wait(&status);
msg2 = "Failed";
if (WSTOPSIG(status) == SIGTRAP) {
child_nr_tests = ptrace(PTRACE_PEEKDATA, child_pid,
&nr_tests, 0);
if (child_nr_tests == nr_tests)
msg2 = "Ok";
if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1)) {
perror("Can't poke\n");
exit(-1);
}
}
nr_tests++;
printf("%s [%s]\n", msg, msg2);
}
static void launch_instruction_breakpoints(char *buf, int local, int global)
{
int i;
for (i = 0; i < 4; i++) {
set_breakpoint_addr(dummy_funcs[i], i);
toggle_breakpoint(i, BP_X, 1, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
sprintf(buf, "Test breakpoint %d with local: %d global: %d",
i, local, global);
check_success(buf);
toggle_breakpoint(i, BP_X, 1, local, global, 0);
}
}
static void launch_watchpoints(char *buf, int mode, int len,
int local, int global)
{
const char *mode_str;
int i;
if (mode == BP_W)
mode_str = "write";
else
mode_str = "read";
for (i = 0; i < 4; i++) {
set_breakpoint_addr(&dummy_var[i], i);
toggle_breakpoint(i, mode, len, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
sprintf(buf, "Test %s watchpoint %d with len: %d local: "
"%d global: %d", mode_str, i, len, local, global);
check_success(buf);
toggle_breakpoint(i, mode, len, local, global, 0);
}
}
/* Set the breakpoints and check the child successfully trigger them */
static void launch_tests(void)
{
char buf[1024];
int len, local, global, i;
/* Instruction breakpoints */
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
launch_instruction_breakpoints(buf, local, global);
}
}
/* Write watchpoint */
for (len = 1; len <= sizeof(long); len <<= 1) {
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
launch_watchpoints(buf, BP_W, len,
local, global);
}
}
}
/* Read-Write watchpoint */
for (len = 1; len <= sizeof(long); len <<= 1) {
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
if (!local && !global)
continue;
launch_watchpoints(buf, BP_RW, len,
local, global);
}
}
}
/* Icebp traps */
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success("Test icebp");
/* Int 3 traps */
ptrace(PTRACE_CONT, child_pid, NULL, 0);
check_success("Test int 3 trap");
ptrace(PTRACE_CONT, child_pid, NULL, 0);
}
int main(int argc, char **argv)
{
pid_t pid;
int ret;
pid = fork();
if (!pid) {
trigger_tests();
return 0;
}
child_pid = pid;
wait(NULL);
launch_tests();
wait(NULL);
return 0;
}
| gpl-2.0 |
kbc-developers/android_kernel_samsung_d2dcm | drivers/staging/comedi/range.c | 9093 | 4431 | /*
module/range.c
comedi routines for voltage ranges
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/uaccess.h>
#include "comedidev.h"
#include "internal.h"
const struct comedi_lrange range_bipolar10 = { 1, {BIP_RANGE(10)} };
EXPORT_SYMBOL(range_bipolar10);
const struct comedi_lrange range_bipolar5 = { 1, {BIP_RANGE(5)} };
EXPORT_SYMBOL(range_bipolar5);
const struct comedi_lrange range_bipolar2_5 = { 1, {BIP_RANGE(2.5)} };
EXPORT_SYMBOL(range_bipolar2_5);
const struct comedi_lrange range_unipolar10 = { 1, {UNI_RANGE(10)} };
EXPORT_SYMBOL(range_unipolar10);
const struct comedi_lrange range_unipolar5 = { 1, {UNI_RANGE(5)} };
EXPORT_SYMBOL(range_unipolar5);
const struct comedi_lrange range_unknown = { 1, {{0, 1000000, UNIT_none} } };
EXPORT_SYMBOL(range_unknown);
/*
COMEDI_RANGEINFO
range information ioctl
arg:
pointer to rangeinfo structure
reads:
range info structure
writes:
n struct comedi_krange structures to rangeinfo->range_ptr
*/
int do_rangeinfo_ioctl(struct comedi_device *dev,
struct comedi_rangeinfo __user *arg)
{
struct comedi_rangeinfo it;
int subd, chan;
const struct comedi_lrange *lr;
struct comedi_subdevice *s;
if (copy_from_user(&it, arg, sizeof(struct comedi_rangeinfo)))
return -EFAULT;
subd = (it.range_type >> 24) & 0xf;
chan = (it.range_type >> 16) & 0xff;
if (!dev->attached)
return -EINVAL;
if (subd >= dev->n_subdevices)
return -EINVAL;
s = dev->subdevices + subd;
if (s->range_table) {
lr = s->range_table;
} else if (s->range_table_list) {
if (chan >= s->n_chan)
return -EINVAL;
lr = s->range_table_list[chan];
} else {
return -EINVAL;
}
if (RANGE_LENGTH(it.range_type) != lr->length) {
DPRINTK("wrong length %d should be %d (0x%08x)\n",
RANGE_LENGTH(it.range_type), lr->length, it.range_type);
return -EINVAL;
}
if (copy_to_user(it.range_ptr, lr->range,
sizeof(struct comedi_krange) * lr->length))
return -EFAULT;
return 0;
}
static int aref_invalid(struct comedi_subdevice *s, unsigned int chanspec)
{
unsigned int aref;
/* disable reporting invalid arefs... maybe someday */
return 0;
aref = CR_AREF(chanspec);
switch (aref) {
case AREF_DIFF:
if (s->subdev_flags & SDF_DIFF)
return 0;
break;
case AREF_COMMON:
if (s->subdev_flags & SDF_COMMON)
return 0;
break;
case AREF_GROUND:
if (s->subdev_flags & SDF_GROUND)
return 0;
break;
case AREF_OTHER:
if (s->subdev_flags & SDF_OTHER)
return 0;
break;
default:
break;
}
DPRINTK("subdevice does not support aref %i", aref);
return 1;
}
/*
This function checks each element in a channel/gain list to make
make sure it is valid.
*/
int comedi_check_chanlist(struct comedi_subdevice *s, int n,
unsigned int *chanlist)
{
int i;
int chan;
if (s->range_table) {
for (i = 0; i < n; i++)
if (CR_CHAN(chanlist[i]) >= s->n_chan ||
CR_RANGE(chanlist[i]) >= s->range_table->length
|| aref_invalid(s, chanlist[i])) {
printk(KERN_ERR "bad chanlist[%d]=0x%08x "
"in_chan=%d range length=%d\n", i,
chanlist[i], s->n_chan,
s->range_table->length);
return -EINVAL;
}
} else if (s->range_table_list) {
for (i = 0; i < n; i++) {
chan = CR_CHAN(chanlist[i]);
if (chan >= s->n_chan ||
CR_RANGE(chanlist[i]) >=
s->range_table_list[chan]->length
|| aref_invalid(s, chanlist[i])) {
printk(KERN_ERR "bad chanlist[%d]=0x%08x\n",
i, chanlist[i]);
return -EINVAL;
}
}
} else {
printk(KERN_ERR "comedi: (bug) no range type list!\n");
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(comedi_check_chanlist);
| gpl-2.0 |
grzmot22/android_kernel_htc_protou | drivers/staging/rtl8192u/r8192U_dm.c | 9093 | 130478 | /*++
Copyright-c Realtek Semiconductor Corp. All rights reserved.
Module Name:
r8192U_dm.c
Abstract:
HW dynamic mechanism.
Major Change History:
When Who What
---------- --------------- -------------------------------
2008-05-14 amy create version 0 porting from windows code.
--*/
#include "r8192U.h"
#include "r8192U_dm.h"
#include "r8192U_hw.h"
#include "r819xU_phy.h"
#include "r819xU_phyreg.h"
#include "r8190_rtl8256.h"
#include "r819xU_cmdpkt.h"
/*---------------------------Define Local Constant---------------------------*/
//
// Indicate different AP vendor for IOT issue.
//
static u32 edca_setting_DL[HT_IOT_PEER_MAX] =
{ 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5ea44f};
static u32 edca_setting_UL[HT_IOT_PEER_MAX] =
{ 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f};
#define RTK_UL_EDCA 0xa44f
#define RTK_DL_EDCA 0x5e4322
/*---------------------------Define Local Constant---------------------------*/
/*------------------------Define global variable-----------------------------*/
// Debug variable ?
dig_t dm_digtable;
// Store current shoftware write register content for MAC PHY.
u8 dm_shadow[16][256] = {{0}};
// For Dynamic Rx Path Selection by Signal Strength
DRxPathSel DM_RxPathSelTable;
/*------------------------Define global variable-----------------------------*/
/*------------------------Define local variable------------------------------*/
/*------------------------Define local variable------------------------------*/
/*--------------------Define export function prototype-----------------------*/
extern void init_hal_dm(struct net_device *dev);
extern void deinit_hal_dm(struct net_device *dev);
extern void hal_dm_watchdog(struct net_device *dev);
extern void init_rate_adaptive(struct net_device *dev);
extern void dm_txpower_trackingcallback(struct work_struct *work);
extern void dm_cck_txpower_adjust(struct net_device *dev,bool binch14);
extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
u32 dm_type,
u32 dm_value);
extern void DM_ChangeFsyncSetting(struct net_device *dev,
s32 DM_Type,
s32 DM_Value);
extern void dm_force_tx_fw_info(struct net_device *dev,
u32 force_type,
u32 force_value);
extern void dm_init_edca_turbo(struct net_device *dev);
extern void dm_rf_operation_test_callback(unsigned long data);
extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
extern void dm_fsync_timer_callback(unsigned long data);
extern void dm_check_fsync(struct net_device *dev);
extern void dm_shadow_init(struct net_device *dev);
/*--------------------Define export function prototype-----------------------*/
/*---------------------Define local function prototype-----------------------*/
// DM --> Rate Adaptive
static void dm_check_rate_adaptive(struct net_device *dev);
// DM --> Bandwidth switch
static void dm_init_bandwidth_autoswitch(struct net_device *dev);
static void dm_bandwidth_autoswitch( struct net_device *dev);
// DM --> TX power control
//static void dm_initialize_txpower_tracking(struct net_device *dev);
static void dm_check_txpower_tracking(struct net_device *dev);
//static void dm_txpower_reset_recovery(struct net_device *dev);
// DM --> BB init gain restore
#ifndef RTL8192U
static void dm_bb_initialgain_restore(struct net_device *dev);
// DM --> BB init gain backup
static void dm_bb_initialgain_backup(struct net_device *dev);
#endif
// DM --> Dynamic Init Gain by RSSI
static void dm_dig_init(struct net_device *dev);
static void dm_ctrl_initgain_byrssi(struct net_device *dev);
static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev);
static void dm_ctrl_initgain_byrssi_by_driverrssi( struct net_device *dev);
static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(struct net_device *dev);
static void dm_initial_gain(struct net_device *dev);
static void dm_pd_th(struct net_device *dev);
static void dm_cs_ratio(struct net_device *dev);
static void dm_init_ctstoself(struct net_device *dev);
// DM --> EDCA turboe mode control
static void dm_check_edca_turbo(struct net_device *dev);
// DM --> HW RF control
static void dm_check_rfctrl_gpio(struct net_device *dev);
#ifndef RTL8190P
//static void dm_gpio_change_rf(struct net_device *dev);
#endif
// DM --> Check PBC
static void dm_check_pbc_gpio(struct net_device *dev);
// DM --> Check current RX RF path state
static void dm_check_rx_path_selection(struct net_device *dev);
static void dm_init_rxpath_selection(struct net_device *dev);
static void dm_rxpath_sel_byrssi(struct net_device *dev);
// DM --> Fsync for broadcom ap
static void dm_init_fsync(struct net_device *dev);
static void dm_deInit_fsync(struct net_device *dev);
//Added by vivi, 20080522
static void dm_check_txrateandretrycount(struct net_device *dev);
/*---------------------Define local function prototype-----------------------*/
/*---------------------Define of Tx Power Control For Near/Far Range --------*/ //Add by Jacken 2008/02/18
static void dm_init_dynamic_txpower(struct net_device *dev);
static void dm_dynamic_txpower(struct net_device *dev);
// DM --> For rate adaptive and DIG, we must send RSSI to firmware
static void dm_send_rssi_tofw(struct net_device *dev);
static void dm_ctstoself(struct net_device *dev);
/*---------------------------Define function prototype------------------------*/
//================================================================================
// HW Dynamic mechanism interface.
//================================================================================
//
// Description:
// Prepare SW resource for HW dynamic mechanism.
//
// Assumption:
// This function is only invoked at driver intialization once.
//
//
extern void
init_hal_dm(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
// Undecorated Smoothed Signal Strength, it can utilized to dynamic mechanism.
priv->undecorated_smoothed_pwdb = -1;
//Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code.
dm_init_dynamic_txpower(dev);
init_rate_adaptive(dev);
//dm_initialize_txpower_tracking(dev);
dm_dig_init(dev);
dm_init_edca_turbo(dev);
dm_init_bandwidth_autoswitch(dev);
dm_init_fsync(dev);
dm_init_rxpath_selection(dev);
dm_init_ctstoself(dev);
} // InitHalDm
extern void deinit_hal_dm(struct net_device *dev)
{
dm_deInit_fsync(dev);
}
#ifdef USB_RX_AGGREGATION_SUPPORT
void dm_CheckRxAggregation(struct net_device *dev) {
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
static unsigned long lastTxOkCnt = 0;
static unsigned long lastRxOkCnt = 0;
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
/*
if (pHalData->bForcedUsbRxAggr) {
if (pHalData->ForcedUsbRxAggrInfo == 0) {
if (pHalData->bCurrentRxAggrEnable) {
Adapter->HalFunc.HalUsbRxAggrHandler(Adapter, FALSE);
}
} else {
if (!pHalData->bCurrentRxAggrEnable || (pHalData->ForcedUsbRxAggrInfo != pHalData->LastUsbRxAggrInfoSetting)) {
Adapter->HalFunc.HalUsbRxAggrHandler(Adapter, TRUE);
}
}
return;
}
*/
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
if((curTxOkCnt + curRxOkCnt) < 15000000) {
return;
}
if(curTxOkCnt > 4*curRxOkCnt) {
if (priv->bCurrentRxAggrEnable) {
write_nic_dword(dev, 0x1a8, 0);
priv->bCurrentRxAggrEnable = false;
}
}else{
if (!priv->bCurrentRxAggrEnable && !pHTInfo->bCurrentRT2RTAggregation) {
u32 ulValue;
ulValue = (pHTInfo->UsbRxFwAggrEn<<24) | (pHTInfo->UsbRxFwAggrPageNum<<16) |
(pHTInfo->UsbRxFwAggrPacketNum<<8) | (pHTInfo->UsbRxFwAggrTimeout);
/*
* If usb rx firmware aggregation is enabled,
* when anyone of three threshold conditions above is reached,
* firmware will send aggregated packet to driver.
*/
write_nic_dword(dev, 0x1a8, ulValue);
priv->bCurrentRxAggrEnable = true;
}
}
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
} // dm_CheckEdcaTurbo
#endif
extern void hal_dm_watchdog(struct net_device *dev)
{
//struct r8192_priv *priv = ieee80211_priv(dev);
//static u8 previous_bssid[6] ={0};
/*Add by amy 2008/05/15 ,porting from windows code.*/
dm_check_rate_adaptive(dev);
dm_dynamic_txpower(dev);
dm_check_txrateandretrycount(dev);
dm_check_txpower_tracking(dev);
dm_ctrl_initgain_byrssi(dev);
dm_check_edca_turbo(dev);
dm_bandwidth_autoswitch(dev);
dm_check_rfctrl_gpio(dev);
dm_check_rx_path_selection(dev);
dm_check_fsync(dev);
// Add by amy 2008-05-15 porting from windows code.
dm_check_pbc_gpio(dev);
dm_send_rssi_tofw(dev);
dm_ctstoself(dev);
#ifdef USB_RX_AGGREGATION_SUPPORT
dm_CheckRxAggregation(dev);
#endif
} //HalDmWatchDog
/*
* Decide Rate Adaptive Set according to distance (signal strength)
* 01/11/2008 MHC Modify input arguments and RATR table level.
* 01/16/2008 MHC RF_Type is assigned in ReadAdapterInfo(). We must call
* the function after making sure RF_Type.
*/
extern void init_rate_adaptive(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
prate_adaptive pra = (prate_adaptive)&priv->rate_adaptive;
pra->ratr_state = DM_RATR_STA_MAX;
pra->high2low_rssi_thresh_for_ra = RateAdaptiveTH_High;
pra->low2high_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M+5;
pra->low2high_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M+5;
pra->high_rssi_thresh_for_ra = RateAdaptiveTH_High+5;
pra->low_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M;
pra->low_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M;
if(priv->CustomerID == RT_CID_819x_Netcore)
pra->ping_rssi_enable = 1;
else
pra->ping_rssi_enable = 0;
pra->ping_rssi_thresh_for_ra = 15;
if (priv->rf_type == RF_2T4R)
{
// 07/10/08 MH Modify for RA smooth scheme.
/* 2008/01/11 MH Modify 2T RATR table for different RSSI. 080515 porting by amy from windows code.*/
pra->upper_rssi_threshold_ratr = 0x8f0f0000;
pra->middle_rssi_threshold_ratr = 0x8f0ff000;
pra->low_rssi_threshold_ratr = 0x8f0ff001;
pra->low_rssi_threshold_ratr_40M = 0x8f0ff005;
pra->low_rssi_threshold_ratr_20M = 0x8f0ff001;
pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
}
else if (priv->rf_type == RF_1T2R)
{
pra->upper_rssi_threshold_ratr = 0x000f0000;
pra->middle_rssi_threshold_ratr = 0x000ff000;
pra->low_rssi_threshold_ratr = 0x000ff001;
pra->low_rssi_threshold_ratr_40M = 0x000ff005;
pra->low_rssi_threshold_ratr_20M = 0x000ff001;
pra->ping_rssi_ratr = 0x0000000d;//cosa add for test
}
} // InitRateAdaptive
/*-----------------------------------------------------------------------------
* Function: dm_check_rate_adaptive()
*
* Overview:
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/26/08 amy Create version 0 proting from windows code.
*
*---------------------------------------------------------------------------*/
static void dm_check_rate_adaptive(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
prate_adaptive pra = (prate_adaptive)&priv->rate_adaptive;
u32 currentRATR, targetRATR = 0;
u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0;
bool bshort_gi_enabled = false;
static u8 ping_rssi_state=0;
if(!priv->up)
{
RT_TRACE(COMP_RATE, "<---- dm_check_rate_adaptive(): driver is going to unload\n");
return;
}
if(pra->rate_adaptive_disabled)//this variable is set by ioctl.
return;
// TODO: Only 11n mode is implemented currently,
if( !(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
priv->ieee80211->mode == WIRELESS_MODE_N_5G))
return;
if( priv->ieee80211->state == IEEE80211_LINKED )
{
// RT_TRACE(COMP_RATE, "dm_CheckRateAdaptive(): \t");
//
// Check whether Short GI is enabled
//
bshort_gi_enabled = (pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI40MHz) ||
(!pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI20MHz);
pra->upper_rssi_threshold_ratr =
(pra->upper_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
pra->middle_rssi_threshold_ratr =
(pra->middle_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
pra->low_rssi_threshold_ratr =
(pra->low_rssi_threshold_ratr_40M & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
}
else
{
pra->low_rssi_threshold_ratr =
(pra->low_rssi_threshold_ratr_20M & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
}
//cosa add for test
pra->ping_rssi_ratr =
(pra->ping_rssi_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ;
/* 2007/10/08 MH We support RA smooth scheme now. When it is the first
time to link with AP. We will not change upper/lower threshold. If
STA stay in high or low level, we must change two different threshold
to prevent jumping frequently. */
if (pra->ratr_state == DM_RATR_STA_HIGH)
{
HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra;
LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
(pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M);
}
else if (pra->ratr_state == DM_RATR_STA_LOW)
{
HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
(pra->low2high_rssi_thresh_for_ra40M):(pra->low2high_rssi_thresh_for_ra20M);
}
else
{
HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)?
(pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M);
}
//DbgPrint("[DM] THresh H/L=%d/%d\n\r", RATR.HighRSSIThreshForRA, RATR.LowRSSIThreshForRA);
if(priv->undecorated_smoothed_pwdb >= (long)HighRSSIThreshForRA)
{
//DbgPrint("[DM] RSSI=%d STA=HIGH\n\r", pHalData->UndecoratedSmoothedPWDB);
pra->ratr_state = DM_RATR_STA_HIGH;
targetRATR = pra->upper_rssi_threshold_ratr;
}else if(priv->undecorated_smoothed_pwdb >= (long)LowRSSIThreshForRA)
{
//DbgPrint("[DM] RSSI=%d STA=Middle\n\r", pHalData->UndecoratedSmoothedPWDB);
pra->ratr_state = DM_RATR_STA_MIDDLE;
targetRATR = pra->middle_rssi_threshold_ratr;
}else
{
//DbgPrint("[DM] RSSI=%d STA=LOW\n\r", pHalData->UndecoratedSmoothedPWDB);
pra->ratr_state = DM_RATR_STA_LOW;
targetRATR = pra->low_rssi_threshold_ratr;
}
//cosa add for test
if(pra->ping_rssi_enable)
{
//pHalData->UndecoratedSmoothedPWDB = 19;
if(priv->undecorated_smoothed_pwdb < (long)(pra->ping_rssi_thresh_for_ra+5))
{
if( (priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) ||
ping_rssi_state )
{
//DbgPrint("TestRSSI = %d, set RATR to 0x%x \n", pHalData->UndecoratedSmoothedPWDB, pRA->TestRSSIRATR);
pra->ratr_state = DM_RATR_STA_LOW;
targetRATR = pra->ping_rssi_ratr;
ping_rssi_state = 1;
}
//else
// DbgPrint("TestRSSI is between the range. \n");
}
else
{
//DbgPrint("TestRSSI Recover to 0x%x \n", targetRATR);
ping_rssi_state = 0;
}
}
// 2008.04.01
// For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
if(priv->ieee80211->GetHalfNmodeSupportByAPsHandler(dev))
targetRATR &= 0xf00fffff;
//
// Check whether updating of RATR0 is required
//
currentRATR = read_nic_dword(dev, RATR0);
if( targetRATR != currentRATR )
{
u32 ratr_value;
ratr_value = targetRATR;
RT_TRACE(COMP_RATE,"currentRATR = %x, targetRATR = %x\n", currentRATR, targetRATR);
if(priv->rf_type == RF_1T2R)
{
ratr_value &= ~(RATE_ALL_OFDM_2SS);
}
write_nic_dword(dev, RATR0, ratr_value);
write_nic_byte(dev, UFWP, 1);
pra->last_ratr = targetRATR;
}
}
else
{
pra->ratr_state = DM_RATR_STA_MAX;
}
} // dm_CheckRateAdaptive
static void dm_init_bandwidth_autoswitch(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz = BW_AUTO_SWITCH_LOW_HIGH;
priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz = BW_AUTO_SWITCH_HIGH_LOW;
priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false;
priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable = false;
} // dm_init_bandwidth_autoswitch
static void dm_bandwidth_autoswitch(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ||!priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable){
return;
}else{
if(priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz == false){//If send packets in 40 Mhz in 20/40
if(priv->undecorated_smoothed_pwdb <= priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = true;
}else{//in force send packets in 20 Mhz in 20/40
if(priv->undecorated_smoothed_pwdb >= priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz)
priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false;
}
}
} // dm_BandwidthAutoSwitch
//OFDM default at 0db, index=6.
static u32 OFDMSwingTable[OFDM_Table_Length] = {
0x7f8001fe, // 0, +6db
0x71c001c7, // 1, +5db
0x65400195, // 2, +4db
0x5a400169, // 3, +3db
0x50800142, // 4, +2db
0x47c0011f, // 5, +1db
0x40000100, // 6, +0db ===> default, upper for higher temprature, lower for low temprature
0x390000e4, // 7, -1db
0x32c000cb, // 8, -2db
0x2d4000b5, // 9, -3db
0x288000a2, // 10, -4db
0x24000090, // 11, -5db
0x20000080, // 12, -6db
0x1c800072, // 13, -7db
0x19800066, // 14, -8db
0x26c0005b, // 15, -9db
0x24400051, // 16, -10db
0x12000048, // 17, -11db
0x10000040 // 18, -12db
};
static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, // 0, +0db ===> CCK40M default
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, // 1, -1db
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, // 2, -2db
{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, // 3, -3db
{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, // 4, -4db
{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, // 5, -5db
{0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, // 6, -6db ===> CCK20M default
{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, // 7, -7db
{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, // 8, -8db
{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, // 9, -9db
{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 10, -10db
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01} // 11, -11db
};
static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, // 0, +0db ===> CCK40M default
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, // 1, -1db
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, // 2, -2db
{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, // 3, -3db
{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, // 4, -4db
{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, // 5, -5db
{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 6, -6db ===> CCK20M default
{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, // 7, -7db
{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 8, -8db
{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 9, -9db
{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 10, -10db
{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00} // 11, -11db
};
static void dm_TXPowerTrackingCallback_TSSI(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
bool bHighpowerstate, viviflag = FALSE;
DCMD_TXCMD_T tx_cmd;
u8 powerlevelOFDM24G;
int i =0, j = 0, k = 0;
u8 RF_Type, tmp_report[5]={0, 0, 0, 0, 0};
u32 Value;
u8 Pwr_Flag;
u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver=0;
//RT_STATUS rtStatus = RT_STATUS_SUCCESS;
bool rtStatus = true;
u32 delta=0;
write_nic_byte(dev, 0x1ba, 0);
priv->ieee80211->bdynamic_txpower_enable = false;
bHighpowerstate = priv->bDynamicTxHighPower;
powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
RF_Type = priv->rf_type;
Value = (RF_Type<<8) | powerlevelOFDM24G;
RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n", powerlevelOFDM24G);
for(j = 0; j<=30; j++)
{ //fill tx_cmd
tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
tx_cmd.Length = 4;
tx_cmd.Value = Value;
#ifdef RTL8192U
rtStatus = SendTxCommandPacket(dev, &tx_cmd, 12);
if (rtStatus == RT_STATUS_FAILURE)
{
RT_TRACE(COMP_POWER_TRACKING, "Set configuration with tx cmd queue fail!\n");
}
#else
cmpk_message_handle_tx(dev, (u8*)&tx_cmd,
DESC_PACKET_TYPE_INIT, sizeof(DCMD_TXCMD_T));
#endif
mdelay(1);
//DbgPrint("hi, vivi, strange\n");
for(i = 0;i <= 30; i++)
{
Pwr_Flag = read_nic_byte(dev, 0x1ba);
if (Pwr_Flag == 0)
{
mdelay(1);
continue;
}
#ifdef RTL8190P
Avg_TSSI_Meas = read_nic_word(dev, 0x1bc);
#else
Avg_TSSI_Meas = read_nic_word(dev, 0x13c);
#endif
if(Avg_TSSI_Meas == 0)
{
write_nic_byte(dev, 0x1ba, 0);
break;
}
for(k = 0;k < 5; k++)
{
#ifdef RTL8190P
tmp_report[k] = read_nic_byte(dev, 0x1d8+k);
#else
if(k !=4)
tmp_report[k] = read_nic_byte(dev, 0x134+k);
else
tmp_report[k] = read_nic_byte(dev, 0x13e);
#endif
RT_TRACE(COMP_POWER_TRACKING, "TSSI_report_value = %d\n", tmp_report[k]);
}
//check if the report value is right
for(k = 0;k < 5; k++)
{
if(tmp_report[k] <= 20)
{
viviflag =TRUE;
break;
}
}
if(viviflag ==TRUE)
{
write_nic_byte(dev, 0x1ba, 0);
viviflag = FALSE;
RT_TRACE(COMP_POWER_TRACKING, "we filted this data\n");
for(k = 0;k < 5; k++)
tmp_report[k] = 0;
break;
}
for(k = 0;k < 5; k++)
{
Avg_TSSI_Meas_from_driver += tmp_report[k];
}
Avg_TSSI_Meas_from_driver = Avg_TSSI_Meas_from_driver*100/5;
RT_TRACE(COMP_POWER_TRACKING, "Avg_TSSI_Meas_from_driver = %d\n", Avg_TSSI_Meas_from_driver);
TSSI_13dBm = priv->TSSI_13dBm;
RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n", TSSI_13dBm);
//if(abs(Avg_TSSI_Meas_from_driver - TSSI_13dBm) <= E_FOR_TX_POWER_TRACK)
// For MacOS-compatible
if(Avg_TSSI_Meas_from_driver > TSSI_13dBm)
delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm;
else
delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver;
if(delta <= E_FOR_TX_POWER_TRACK)
{
priv->ieee80211->bdynamic_txpower_enable = TRUE;
write_nic_byte(dev, 0x1ba, 0);
RT_TRACE(COMP_POWER_TRACKING, "tx power track is done\n");
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
#ifdef RTL8190P
RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex = %d\n", priv->rfc_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex_real = %d\n", priv->rfc_txpowertrackingindex_real);
#endif
RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
return;
}
else
{
if(Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK)
{
if((priv->rfa_txpowertrackingindex > 0)
#ifdef RTL8190P
&&(priv->rfc_txpowertrackingindex > 0)
#endif
)
{
priv->rfa_txpowertrackingindex--;
if(priv->rfa_txpowertrackingindex_real > 4)
{
priv->rfa_txpowertrackingindex_real--;
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
}
#ifdef RTL8190P
priv->rfc_txpowertrackingindex--;
if(priv->rfc_txpowertrackingindex_real > 4)
{
priv->rfc_txpowertrackingindex_real--;
rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
}
#endif
}
}
else
{
if((priv->rfa_txpowertrackingindex < 36)
#ifdef RTL8190P
&&(priv->rfc_txpowertrackingindex < 36)
#endif
)
{
priv->rfa_txpowertrackingindex++;
priv->rfa_txpowertrackingindex_real++;
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
#ifdef RTL8190P
priv->rfc_txpowertrackingindex++;
priv->rfc_txpowertrackingindex_real++;
rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value);
#endif
}
}
priv->cck_present_attentuation_difference
= priv->rfa_txpowertrackingindex - priv->rfa_txpowertracking_default;
if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
priv->cck_present_attentuation
= priv->cck_present_attentuation_20Mdefault + priv->cck_present_attentuation_difference;
else
priv->cck_present_attentuation
= priv->cck_present_attentuation_40Mdefault + priv->cck_present_attentuation_difference;
if(priv->cck_present_attentuation > -1&&priv->cck_present_attentuation <23)
{
if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14)
{
priv->bcck_in_ch14 = TRUE;
dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
}
else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14)
{
priv->bcck_in_ch14 = FALSE;
dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
}
else
dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
}
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
#ifdef RTL8190P
RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex = %d\n", priv->rfc_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex_real = %d\n", priv->rfc_txpowertrackingindex_real);
#endif
RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
if (priv->cck_present_attentuation_difference <= -12||priv->cck_present_attentuation_difference >= 24)
{
priv->ieee80211->bdynamic_txpower_enable = TRUE;
write_nic_byte(dev, 0x1ba, 0);
RT_TRACE(COMP_POWER_TRACKING, "tx power track--->limited\n");
return;
}
}
write_nic_byte(dev, 0x1ba, 0);
Avg_TSSI_Meas_from_driver = 0;
for(k = 0;k < 5; k++)
tmp_report[k] = 0;
break;
}
}
priv->ieee80211->bdynamic_txpower_enable = TRUE;
write_nic_byte(dev, 0x1ba, 0);
}
static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device * dev)
{
#define ThermalMeterVal 9
struct r8192_priv *priv = ieee80211_priv(dev);
u32 tmpRegA, TempCCk;
u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval;
int i =0, CCKSwingNeedUpdate=0;
if(!priv->btxpower_trackingInit)
{
//Query OFDM default setting
tmpRegA= rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance, bMaskDWord);
for(i=0; i<OFDM_Table_Length; i++) //find the index
{
if(tmpRegA == OFDMSwingTable[i])
{
priv->OFDM_index= (u8)i;
RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, OFDM_index=0x%x\n",
rOFDM0_XATxIQImbalance, tmpRegA, priv->OFDM_index);
}
}
//Query CCK default setting From 0xa22
TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2);
for(i=0 ; i<CCK_Table_length ; i++)
{
if(TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0])
{
priv->CCK_index =(u8) i;
RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, CCK_index=0x%x\n",
rCCK0_TxFilter1, TempCCk, priv->CCK_index);
break;
}
}
priv->btxpower_trackingInit = TRUE;
//pHalData->TXPowercount = 0;
return;
}
//==========================
// this is only for test, should be masked
//==========================
// read and filter out unreasonable value
tmpRegA = rtl8192_phy_QueryRFReg(dev, RF90_PATH_A, 0x12, 0x078); // 0x12: RF Reg[10:7]
RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d \n", tmpRegA);
if(tmpRegA < 3 || tmpRegA > 13)
return;
if(tmpRegA >= 12) // if over 12, TP will be bad when high temprature
tmpRegA = 12;
RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d \n", tmpRegA);
priv->ThermalMeter[0] = ThermalMeterVal; //We use fixed value by Bryant's suggestion
priv->ThermalMeter[1] = ThermalMeterVal; //We use fixed value by Bryant's suggestion
//Get current RF-A temprature index
if(priv->ThermalMeter[0] >= (u8)tmpRegA) //lower temprature
{
tmpOFDMindex = tmpCCK20Mindex = 6+(priv->ThermalMeter[0]-(u8)tmpRegA);
tmpCCK40Mindex = tmpCCK20Mindex - 6;
if(tmpOFDMindex >= OFDM_Table_Length)
tmpOFDMindex = OFDM_Table_Length-1;
if(tmpCCK20Mindex >= CCK_Table_length)
tmpCCK20Mindex = CCK_Table_length-1;
if(tmpCCK40Mindex >= CCK_Table_length)
tmpCCK40Mindex = CCK_Table_length-1;
}
else
{
tmpval = ((u8)tmpRegA - priv->ThermalMeter[0]);
if(tmpval >= 6) // higher temprature
tmpOFDMindex = tmpCCK20Mindex = 0; // max to +6dB
else
tmpOFDMindex = tmpCCK20Mindex = 6 - tmpval;
tmpCCK40Mindex = 0;
}
//DbgPrint("%ddb, tmpOFDMindex = %d, tmpCCK20Mindex = %d, tmpCCK40Mindex = %d",
//((u1Byte)tmpRegA - pHalData->ThermalMeter[0]),
//tmpOFDMindex, tmpCCK20Mindex, tmpCCK40Mindex);
if(priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) //40M
tmpCCKindex = tmpCCK40Mindex;
else
tmpCCKindex = tmpCCK20Mindex;
if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14)
{
priv->bcck_in_ch14 = TRUE;
CCKSwingNeedUpdate = 1;
}
else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14)
{
priv->bcck_in_ch14 = FALSE;
CCKSwingNeedUpdate = 1;
}
if(priv->CCK_index != tmpCCKindex)
{
priv->CCK_index = tmpCCKindex;
CCKSwingNeedUpdate = 1;
}
if(CCKSwingNeedUpdate)
{
//DbgPrint("Update CCK Swing, CCK_index = %d\n", pHalData->CCK_index);
dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
if(priv->OFDM_index != tmpOFDMindex)
{
priv->OFDM_index = tmpOFDMindex;
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, OFDMSwingTable[priv->OFDM_index]);
RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n",
priv->OFDM_index, OFDMSwingTable[priv->OFDM_index]);
}
priv->txpower_count = 0;
}
extern void dm_txpower_trackingcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,txpower_tracking_wq);
struct net_device *dev = priv->ieee80211->dev;
#ifdef RTL8190P
dm_TXPowerTrackingCallback_TSSI(dev);
#else
if(priv->bDcut == TRUE)
dm_TXPowerTrackingCallback_TSSI(dev);
else
dm_TXPowerTrackingCallback_ThermalMeter(dev);
#endif
}
static void dm_InitializeTXPowerTracking_TSSI(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
//Initial the Tx BB index and mapping value
priv->txbbgain_table[0].txbb_iq_amplifygain = 12;
priv->txbbgain_table[0].txbbgain_value=0x7f8001fe;
priv->txbbgain_table[1].txbb_iq_amplifygain = 11;
priv->txbbgain_table[1].txbbgain_value=0x788001e2;
priv->txbbgain_table[2].txbb_iq_amplifygain = 10;
priv->txbbgain_table[2].txbbgain_value=0x71c001c7;
priv->txbbgain_table[3].txbb_iq_amplifygain = 9;
priv->txbbgain_table[3].txbbgain_value=0x6b8001ae;
priv->txbbgain_table[4].txbb_iq_amplifygain = 8;
priv->txbbgain_table[4].txbbgain_value=0x65400195;
priv->txbbgain_table[5].txbb_iq_amplifygain = 7;
priv->txbbgain_table[5].txbbgain_value=0x5fc0017f;
priv->txbbgain_table[6].txbb_iq_amplifygain = 6;
priv->txbbgain_table[6].txbbgain_value=0x5a400169;
priv->txbbgain_table[7].txbb_iq_amplifygain = 5;
priv->txbbgain_table[7].txbbgain_value=0x55400155;
priv->txbbgain_table[8].txbb_iq_amplifygain = 4;
priv->txbbgain_table[8].txbbgain_value=0x50800142;
priv->txbbgain_table[9].txbb_iq_amplifygain = 3;
priv->txbbgain_table[9].txbbgain_value=0x4c000130;
priv->txbbgain_table[10].txbb_iq_amplifygain = 2;
priv->txbbgain_table[10].txbbgain_value=0x47c0011f;
priv->txbbgain_table[11].txbb_iq_amplifygain = 1;
priv->txbbgain_table[11].txbbgain_value=0x43c0010f;
priv->txbbgain_table[12].txbb_iq_amplifygain = 0;
priv->txbbgain_table[12].txbbgain_value=0x40000100;
priv->txbbgain_table[13].txbb_iq_amplifygain = -1;
priv->txbbgain_table[13].txbbgain_value=0x3c8000f2;
priv->txbbgain_table[14].txbb_iq_amplifygain = -2;
priv->txbbgain_table[14].txbbgain_value=0x390000e4;
priv->txbbgain_table[15].txbb_iq_amplifygain = -3;
priv->txbbgain_table[15].txbbgain_value=0x35c000d7;
priv->txbbgain_table[16].txbb_iq_amplifygain = -4;
priv->txbbgain_table[16].txbbgain_value=0x32c000cb;
priv->txbbgain_table[17].txbb_iq_amplifygain = -5;
priv->txbbgain_table[17].txbbgain_value=0x300000c0;
priv->txbbgain_table[18].txbb_iq_amplifygain = -6;
priv->txbbgain_table[18].txbbgain_value=0x2d4000b5;
priv->txbbgain_table[19].txbb_iq_amplifygain = -7;
priv->txbbgain_table[19].txbbgain_value=0x2ac000ab;
priv->txbbgain_table[20].txbb_iq_amplifygain = -8;
priv->txbbgain_table[20].txbbgain_value=0x288000a2;
priv->txbbgain_table[21].txbb_iq_amplifygain = -9;
priv->txbbgain_table[21].txbbgain_value=0x26000098;
priv->txbbgain_table[22].txbb_iq_amplifygain = -10;
priv->txbbgain_table[22].txbbgain_value=0x24000090;
priv->txbbgain_table[23].txbb_iq_amplifygain = -11;
priv->txbbgain_table[23].txbbgain_value=0x22000088;
priv->txbbgain_table[24].txbb_iq_amplifygain = -12;
priv->txbbgain_table[24].txbbgain_value=0x20000080;
priv->txbbgain_table[25].txbb_iq_amplifygain = -13;
priv->txbbgain_table[25].txbbgain_value=0x1a00006c;
priv->txbbgain_table[26].txbb_iq_amplifygain = -14;
priv->txbbgain_table[26].txbbgain_value=0x1c800072;
priv->txbbgain_table[27].txbb_iq_amplifygain = -15;
priv->txbbgain_table[27].txbbgain_value=0x18000060;
priv->txbbgain_table[28].txbb_iq_amplifygain = -16;
priv->txbbgain_table[28].txbbgain_value=0x19800066;
priv->txbbgain_table[29].txbb_iq_amplifygain = -17;
priv->txbbgain_table[29].txbbgain_value=0x15800056;
priv->txbbgain_table[30].txbb_iq_amplifygain = -18;
priv->txbbgain_table[30].txbbgain_value=0x26c0005b;
priv->txbbgain_table[31].txbb_iq_amplifygain = -19;
priv->txbbgain_table[31].txbbgain_value=0x14400051;
priv->txbbgain_table[32].txbb_iq_amplifygain = -20;
priv->txbbgain_table[32].txbbgain_value=0x24400051;
priv->txbbgain_table[33].txbb_iq_amplifygain = -21;
priv->txbbgain_table[33].txbbgain_value=0x1300004c;
priv->txbbgain_table[34].txbb_iq_amplifygain = -22;
priv->txbbgain_table[34].txbbgain_value=0x12000048;
priv->txbbgain_table[35].txbb_iq_amplifygain = -23;
priv->txbbgain_table[35].txbbgain_value=0x11000044;
priv->txbbgain_table[36].txbb_iq_amplifygain = -24;
priv->txbbgain_table[36].txbbgain_value=0x10000040;
//ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
//This Table is for CH1~CH13
priv->cck_txbbgain_table[0].ccktxbb_valuearray[0] = 0x36;
priv->cck_txbbgain_table[0].ccktxbb_valuearray[1] = 0x35;
priv->cck_txbbgain_table[0].ccktxbb_valuearray[2] = 0x2e;
priv->cck_txbbgain_table[0].ccktxbb_valuearray[3] = 0x25;
priv->cck_txbbgain_table[0].ccktxbb_valuearray[4] = 0x1c;
priv->cck_txbbgain_table[0].ccktxbb_valuearray[5] = 0x12;
priv->cck_txbbgain_table[0].ccktxbb_valuearray[6] = 0x09;
priv->cck_txbbgain_table[0].ccktxbb_valuearray[7] = 0x04;
priv->cck_txbbgain_table[1].ccktxbb_valuearray[0] = 0x33;
priv->cck_txbbgain_table[1].ccktxbb_valuearray[1] = 0x32;
priv->cck_txbbgain_table[1].ccktxbb_valuearray[2] = 0x2b;
priv->cck_txbbgain_table[1].ccktxbb_valuearray[3] = 0x23;
priv->cck_txbbgain_table[1].ccktxbb_valuearray[4] = 0x1a;
priv->cck_txbbgain_table[1].ccktxbb_valuearray[5] = 0x11;
priv->cck_txbbgain_table[1].ccktxbb_valuearray[6] = 0x08;
priv->cck_txbbgain_table[1].ccktxbb_valuearray[7] = 0x04;
priv->cck_txbbgain_table[2].ccktxbb_valuearray[0] = 0x30;
priv->cck_txbbgain_table[2].ccktxbb_valuearray[1] = 0x2f;
priv->cck_txbbgain_table[2].ccktxbb_valuearray[2] = 0x29;
priv->cck_txbbgain_table[2].ccktxbb_valuearray[3] = 0x21;
priv->cck_txbbgain_table[2].ccktxbb_valuearray[4] = 0x19;
priv->cck_txbbgain_table[2].ccktxbb_valuearray[5] = 0x10;
priv->cck_txbbgain_table[2].ccktxbb_valuearray[6] = 0x08;
priv->cck_txbbgain_table[2].ccktxbb_valuearray[7] = 0x03;
priv->cck_txbbgain_table[3].ccktxbb_valuearray[0] = 0x2d;
priv->cck_txbbgain_table[3].ccktxbb_valuearray[1] = 0x2d;
priv->cck_txbbgain_table[3].ccktxbb_valuearray[2] = 0x27;
priv->cck_txbbgain_table[3].ccktxbb_valuearray[3] = 0x1f;
priv->cck_txbbgain_table[3].ccktxbb_valuearray[4] = 0x18;
priv->cck_txbbgain_table[3].ccktxbb_valuearray[5] = 0x0f;
priv->cck_txbbgain_table[3].ccktxbb_valuearray[6] = 0x08;
priv->cck_txbbgain_table[3].ccktxbb_valuearray[7] = 0x03;
priv->cck_txbbgain_table[4].ccktxbb_valuearray[0] = 0x2b;
priv->cck_txbbgain_table[4].ccktxbb_valuearray[1] = 0x2a;
priv->cck_txbbgain_table[4].ccktxbb_valuearray[2] = 0x25;
priv->cck_txbbgain_table[4].ccktxbb_valuearray[3] = 0x1e;
priv->cck_txbbgain_table[4].ccktxbb_valuearray[4] = 0x16;
priv->cck_txbbgain_table[4].ccktxbb_valuearray[5] = 0x0e;
priv->cck_txbbgain_table[4].ccktxbb_valuearray[6] = 0x07;
priv->cck_txbbgain_table[4].ccktxbb_valuearray[7] = 0x03;
priv->cck_txbbgain_table[5].ccktxbb_valuearray[0] = 0x28;
priv->cck_txbbgain_table[5].ccktxbb_valuearray[1] = 0x28;
priv->cck_txbbgain_table[5].ccktxbb_valuearray[2] = 0x22;
priv->cck_txbbgain_table[5].ccktxbb_valuearray[3] = 0x1c;
priv->cck_txbbgain_table[5].ccktxbb_valuearray[4] = 0x15;
priv->cck_txbbgain_table[5].ccktxbb_valuearray[5] = 0x0d;
priv->cck_txbbgain_table[5].ccktxbb_valuearray[6] = 0x07;
priv->cck_txbbgain_table[5].ccktxbb_valuearray[7] = 0x03;
priv->cck_txbbgain_table[6].ccktxbb_valuearray[0] = 0x26;
priv->cck_txbbgain_table[6].ccktxbb_valuearray[1] = 0x25;
priv->cck_txbbgain_table[6].ccktxbb_valuearray[2] = 0x21;
priv->cck_txbbgain_table[6].ccktxbb_valuearray[3] = 0x1b;
priv->cck_txbbgain_table[6].ccktxbb_valuearray[4] = 0x14;
priv->cck_txbbgain_table[6].ccktxbb_valuearray[5] = 0x0d;
priv->cck_txbbgain_table[6].ccktxbb_valuearray[6] = 0x06;
priv->cck_txbbgain_table[6].ccktxbb_valuearray[7] = 0x03;
priv->cck_txbbgain_table[7].ccktxbb_valuearray[0] = 0x24;
priv->cck_txbbgain_table[7].ccktxbb_valuearray[1] = 0x23;
priv->cck_txbbgain_table[7].ccktxbb_valuearray[2] = 0x1f;
priv->cck_txbbgain_table[7].ccktxbb_valuearray[3] = 0x19;
priv->cck_txbbgain_table[7].ccktxbb_valuearray[4] = 0x13;
priv->cck_txbbgain_table[7].ccktxbb_valuearray[5] = 0x0c;
priv->cck_txbbgain_table[7].ccktxbb_valuearray[6] = 0x06;
priv->cck_txbbgain_table[7].ccktxbb_valuearray[7] = 0x03;
priv->cck_txbbgain_table[8].ccktxbb_valuearray[0] = 0x22;
priv->cck_txbbgain_table[8].ccktxbb_valuearray[1] = 0x21;
priv->cck_txbbgain_table[8].ccktxbb_valuearray[2] = 0x1d;
priv->cck_txbbgain_table[8].ccktxbb_valuearray[3] = 0x18;
priv->cck_txbbgain_table[8].ccktxbb_valuearray[4] = 0x11;
priv->cck_txbbgain_table[8].ccktxbb_valuearray[5] = 0x0b;
priv->cck_txbbgain_table[8].ccktxbb_valuearray[6] = 0x06;
priv->cck_txbbgain_table[8].ccktxbb_valuearray[7] = 0x02;
priv->cck_txbbgain_table[9].ccktxbb_valuearray[0] = 0x20;
priv->cck_txbbgain_table[9].ccktxbb_valuearray[1] = 0x20;
priv->cck_txbbgain_table[9].ccktxbb_valuearray[2] = 0x1b;
priv->cck_txbbgain_table[9].ccktxbb_valuearray[3] = 0x16;
priv->cck_txbbgain_table[9].ccktxbb_valuearray[4] = 0x11;
priv->cck_txbbgain_table[9].ccktxbb_valuearray[5] = 0x08;
priv->cck_txbbgain_table[9].ccktxbb_valuearray[6] = 0x05;
priv->cck_txbbgain_table[9].ccktxbb_valuearray[7] = 0x02;
priv->cck_txbbgain_table[10].ccktxbb_valuearray[0] = 0x1f;
priv->cck_txbbgain_table[10].ccktxbb_valuearray[1] = 0x1e;
priv->cck_txbbgain_table[10].ccktxbb_valuearray[2] = 0x1a;
priv->cck_txbbgain_table[10].ccktxbb_valuearray[3] = 0x15;
priv->cck_txbbgain_table[10].ccktxbb_valuearray[4] = 0x10;
priv->cck_txbbgain_table[10].ccktxbb_valuearray[5] = 0x0a;
priv->cck_txbbgain_table[10].ccktxbb_valuearray[6] = 0x05;
priv->cck_txbbgain_table[10].ccktxbb_valuearray[7] = 0x02;
priv->cck_txbbgain_table[11].ccktxbb_valuearray[0] = 0x1d;
priv->cck_txbbgain_table[11].ccktxbb_valuearray[1] = 0x1c;
priv->cck_txbbgain_table[11].ccktxbb_valuearray[2] = 0x18;
priv->cck_txbbgain_table[11].ccktxbb_valuearray[3] = 0x14;
priv->cck_txbbgain_table[11].ccktxbb_valuearray[4] = 0x0f;
priv->cck_txbbgain_table[11].ccktxbb_valuearray[5] = 0x0a;
priv->cck_txbbgain_table[11].ccktxbb_valuearray[6] = 0x05;
priv->cck_txbbgain_table[11].ccktxbb_valuearray[7] = 0x02;
priv->cck_txbbgain_table[12].ccktxbb_valuearray[0] = 0x1b;
priv->cck_txbbgain_table[12].ccktxbb_valuearray[1] = 0x1a;
priv->cck_txbbgain_table[12].ccktxbb_valuearray[2] = 0x17;
priv->cck_txbbgain_table[12].ccktxbb_valuearray[3] = 0x13;
priv->cck_txbbgain_table[12].ccktxbb_valuearray[4] = 0x0e;
priv->cck_txbbgain_table[12].ccktxbb_valuearray[5] = 0x09;
priv->cck_txbbgain_table[12].ccktxbb_valuearray[6] = 0x04;
priv->cck_txbbgain_table[12].ccktxbb_valuearray[7] = 0x02;
priv->cck_txbbgain_table[13].ccktxbb_valuearray[0] = 0x1a;
priv->cck_txbbgain_table[13].ccktxbb_valuearray[1] = 0x19;
priv->cck_txbbgain_table[13].ccktxbb_valuearray[2] = 0x16;
priv->cck_txbbgain_table[13].ccktxbb_valuearray[3] = 0x12;
priv->cck_txbbgain_table[13].ccktxbb_valuearray[4] = 0x0d;
priv->cck_txbbgain_table[13].ccktxbb_valuearray[5] = 0x09;
priv->cck_txbbgain_table[13].ccktxbb_valuearray[6] = 0x04;
priv->cck_txbbgain_table[13].ccktxbb_valuearray[7] = 0x02;
priv->cck_txbbgain_table[14].ccktxbb_valuearray[0] = 0x18;
priv->cck_txbbgain_table[14].ccktxbb_valuearray[1] = 0x17;
priv->cck_txbbgain_table[14].ccktxbb_valuearray[2] = 0x15;
priv->cck_txbbgain_table[14].ccktxbb_valuearray[3] = 0x11;
priv->cck_txbbgain_table[14].ccktxbb_valuearray[4] = 0x0c;
priv->cck_txbbgain_table[14].ccktxbb_valuearray[5] = 0x08;
priv->cck_txbbgain_table[14].ccktxbb_valuearray[6] = 0x04;
priv->cck_txbbgain_table[14].ccktxbb_valuearray[7] = 0x02;
priv->cck_txbbgain_table[15].ccktxbb_valuearray[0] = 0x17;
priv->cck_txbbgain_table[15].ccktxbb_valuearray[1] = 0x16;
priv->cck_txbbgain_table[15].ccktxbb_valuearray[2] = 0x13;
priv->cck_txbbgain_table[15].ccktxbb_valuearray[3] = 0x10;
priv->cck_txbbgain_table[15].ccktxbb_valuearray[4] = 0x0c;
priv->cck_txbbgain_table[15].ccktxbb_valuearray[5] = 0x08;
priv->cck_txbbgain_table[15].ccktxbb_valuearray[6] = 0x04;
priv->cck_txbbgain_table[15].ccktxbb_valuearray[7] = 0x02;
priv->cck_txbbgain_table[16].ccktxbb_valuearray[0] = 0x16;
priv->cck_txbbgain_table[16].ccktxbb_valuearray[1] = 0x15;
priv->cck_txbbgain_table[16].ccktxbb_valuearray[2] = 0x12;
priv->cck_txbbgain_table[16].ccktxbb_valuearray[3] = 0x0f;
priv->cck_txbbgain_table[16].ccktxbb_valuearray[4] = 0x0b;
priv->cck_txbbgain_table[16].ccktxbb_valuearray[5] = 0x07;
priv->cck_txbbgain_table[16].ccktxbb_valuearray[6] = 0x04;
priv->cck_txbbgain_table[16].ccktxbb_valuearray[7] = 0x01;
priv->cck_txbbgain_table[17].ccktxbb_valuearray[0] = 0x14;
priv->cck_txbbgain_table[17].ccktxbb_valuearray[1] = 0x14;
priv->cck_txbbgain_table[17].ccktxbb_valuearray[2] = 0x11;
priv->cck_txbbgain_table[17].ccktxbb_valuearray[3] = 0x0e;
priv->cck_txbbgain_table[17].ccktxbb_valuearray[4] = 0x0b;
priv->cck_txbbgain_table[17].ccktxbb_valuearray[5] = 0x07;
priv->cck_txbbgain_table[17].ccktxbb_valuearray[6] = 0x03;
priv->cck_txbbgain_table[17].ccktxbb_valuearray[7] = 0x02;
priv->cck_txbbgain_table[18].ccktxbb_valuearray[0] = 0x13;
priv->cck_txbbgain_table[18].ccktxbb_valuearray[1] = 0x13;
priv->cck_txbbgain_table[18].ccktxbb_valuearray[2] = 0x10;
priv->cck_txbbgain_table[18].ccktxbb_valuearray[3] = 0x0d;
priv->cck_txbbgain_table[18].ccktxbb_valuearray[4] = 0x0a;
priv->cck_txbbgain_table[18].ccktxbb_valuearray[5] = 0x06;
priv->cck_txbbgain_table[18].ccktxbb_valuearray[6] = 0x03;
priv->cck_txbbgain_table[18].ccktxbb_valuearray[7] = 0x01;
priv->cck_txbbgain_table[19].ccktxbb_valuearray[0] = 0x12;
priv->cck_txbbgain_table[19].ccktxbb_valuearray[1] = 0x12;
priv->cck_txbbgain_table[19].ccktxbb_valuearray[2] = 0x0f;
priv->cck_txbbgain_table[19].ccktxbb_valuearray[3] = 0x0c;
priv->cck_txbbgain_table[19].ccktxbb_valuearray[4] = 0x09;
priv->cck_txbbgain_table[19].ccktxbb_valuearray[5] = 0x06;
priv->cck_txbbgain_table[19].ccktxbb_valuearray[6] = 0x03;
priv->cck_txbbgain_table[19].ccktxbb_valuearray[7] = 0x01;
priv->cck_txbbgain_table[20].ccktxbb_valuearray[0] = 0x11;
priv->cck_txbbgain_table[20].ccktxbb_valuearray[1] = 0x11;
priv->cck_txbbgain_table[20].ccktxbb_valuearray[2] = 0x0f;
priv->cck_txbbgain_table[20].ccktxbb_valuearray[3] = 0x0c;
priv->cck_txbbgain_table[20].ccktxbb_valuearray[4] = 0x09;
priv->cck_txbbgain_table[20].ccktxbb_valuearray[5] = 0x06;
priv->cck_txbbgain_table[20].ccktxbb_valuearray[6] = 0x03;
priv->cck_txbbgain_table[20].ccktxbb_valuearray[7] = 0x01;
priv->cck_txbbgain_table[21].ccktxbb_valuearray[0] = 0x10;
priv->cck_txbbgain_table[21].ccktxbb_valuearray[1] = 0x10;
priv->cck_txbbgain_table[21].ccktxbb_valuearray[2] = 0x0e;
priv->cck_txbbgain_table[21].ccktxbb_valuearray[3] = 0x0b;
priv->cck_txbbgain_table[21].ccktxbb_valuearray[4] = 0x08;
priv->cck_txbbgain_table[21].ccktxbb_valuearray[5] = 0x05;
priv->cck_txbbgain_table[21].ccktxbb_valuearray[6] = 0x03;
priv->cck_txbbgain_table[21].ccktxbb_valuearray[7] = 0x01;
priv->cck_txbbgain_table[22].ccktxbb_valuearray[0] = 0x0f;
priv->cck_txbbgain_table[22].ccktxbb_valuearray[1] = 0x0f;
priv->cck_txbbgain_table[22].ccktxbb_valuearray[2] = 0x0d;
priv->cck_txbbgain_table[22].ccktxbb_valuearray[3] = 0x0b;
priv->cck_txbbgain_table[22].ccktxbb_valuearray[4] = 0x08;
priv->cck_txbbgain_table[22].ccktxbb_valuearray[5] = 0x05;
priv->cck_txbbgain_table[22].ccktxbb_valuearray[6] = 0x03;
priv->cck_txbbgain_table[22].ccktxbb_valuearray[7] = 0x01;
//ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29
//This Table is for CH14
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[0] = 0x36;
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[1] = 0x35;
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[2] = 0x2e;
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[3] = 0x1b;
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[0] = 0x33;
priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[1] = 0x32;
priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[2] = 0x2b;
priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[3] = 0x19;
priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[0] = 0x30;
priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[1] = 0x2f;
priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[2] = 0x29;
priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[3] = 0x18;
priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[0] = 0x2d;
priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[1] = 0x2d;
priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[2] = 0x27;
priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[3] = 0x17;
priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[0] = 0x2b;
priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[1] = 0x2a;
priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[2] = 0x25;
priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[3] = 0x15;
priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[0] = 0x28;
priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[1] = 0x28;
priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[2] = 0x22;
priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[3] = 0x14;
priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[0] = 0x26;
priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[1] = 0x25;
priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[2] = 0x21;
priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[3] = 0x13;
priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[0] = 0x24;
priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[1] = 0x23;
priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[2] = 0x1f;
priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[3] = 0x12;
priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[0] = 0x22;
priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[1] = 0x21;
priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[2] = 0x1d;
priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[3] = 0x11;
priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[0] = 0x20;
priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[1] = 0x20;
priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[2] = 0x1b;
priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[3] = 0x10;
priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[0] = 0x1f;
priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[1] = 0x1e;
priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[2] = 0x1a;
priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[3] = 0x0f;
priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[0] = 0x1d;
priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[1] = 0x1c;
priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[2] = 0x18;
priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[3] = 0x0e;
priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[0] = 0x1b;
priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[1] = 0x1a;
priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[2] = 0x17;
priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[3] = 0x0e;
priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[0] = 0x1a;
priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[1] = 0x19;
priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[2] = 0x16;
priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[3] = 0x0d;
priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[0] = 0x18;
priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[1] = 0x17;
priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[2] = 0x15;
priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[3] = 0x0c;
priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[0] = 0x17;
priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[1] = 0x16;
priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[2] = 0x13;
priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[3] = 0x0b;
priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[0] = 0x16;
priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[1] = 0x15;
priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[2] = 0x12;
priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[3] = 0x0b;
priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[0] = 0x14;
priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[1] = 0x14;
priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[2] = 0x11;
priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[3] = 0x0a;
priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[0] = 0x13;
priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[1] = 0x13;
priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[2] = 0x10;
priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[3] = 0x0a;
priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[0] = 0x12;
priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[1] = 0x12;
priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[2] = 0x0f;
priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[3] = 0x09;
priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[0] = 0x11;
priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[1] = 0x11;
priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[2] = 0x0f;
priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[3] = 0x09;
priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[0] = 0x10;
priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[1] = 0x10;
priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[2] = 0x0e;
priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[3] = 0x08;
priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[7] = 0x00;
priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[0] = 0x0f;
priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[1] = 0x0f;
priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[2] = 0x0d;
priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[3] = 0x08;
priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[4] = 0x00;
priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[5] = 0x00;
priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[6] = 0x00;
priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[7] = 0x00;
priv->btxpower_tracking = TRUE;
priv->txpower_count = 0;
priv->btxpower_trackingInit = FALSE;
}
static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
// Tx Power tracking by Theremal Meter require Firmware R/W 3-wire. This mechanism
// can be enabled only when Firmware R/W 3-wire is enabled. Otherwise, frequent r/w
// 3-wire by driver cause RF goes into wrong state.
if(priv->ieee80211->FwRWRF)
priv->btxpower_tracking = TRUE;
else
priv->btxpower_tracking = FALSE;
priv->txpower_count = 0;
priv->btxpower_trackingInit = FALSE;
}
void dm_initialize_txpower_tracking(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef RTL8190P
dm_InitializeTXPowerTracking_TSSI(dev);
#else
if(priv->bDcut == TRUE)
dm_InitializeTXPowerTracking_TSSI(dev);
else
dm_InitializeTXPowerTracking_ThermalMeter(dev);
#endif
}// dm_InitializeTXPowerTracking
static void dm_CheckTXPowerTracking_TSSI(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u32 tx_power_track_counter = 0;
if(!priv->btxpower_tracking)
return;
else
{
if((tx_power_track_counter % 30 == 0)&&(tx_power_track_counter != 0))
{
queue_delayed_work(priv->priv_wq,&priv->txpower_tracking_wq,0);
}
tx_power_track_counter++;
}
}
static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u8 TM_Trigger=0;
//DbgPrint("dm_CheckTXPowerTracking() \n");
if(!priv->btxpower_tracking)
return;
else
{
if(priv->txpower_count <= 2)
{
priv->txpower_count++;
return;
}
}
if(!TM_Trigger)
{
//Attention!! You have to wirte all 12bits data to RF, or it may cause RF to crash
//actually write reg0x02 bit1=0, then bit1=1.
//DbgPrint("Trigger ThermalMeter, write RF reg0x2 = 0x4d to 0x4f\n");
rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
TM_Trigger = 1;
return;
}
else
{
//DbgPrint("Schedule TxPowerTrackingWorkItem\n");
queue_delayed_work(priv->priv_wq,&priv->txpower_tracking_wq,0);
TM_Trigger = 0;
}
}
static void dm_check_txpower_tracking(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
//static u32 tx_power_track_counter = 0;
#ifdef RTL8190P
dm_CheckTXPowerTracking_TSSI(dev);
#else
if(priv->bDcut == TRUE)
dm_CheckTXPowerTracking_TSSI(dev);
else
dm_CheckTXPowerTracking_ThermalMeter(dev);
#endif
} // dm_CheckTXPowerTracking
static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
{
u32 TempVal;
struct r8192_priv *priv = ieee80211_priv(dev);
//Write 0xa22 0xa23
TempVal = 0;
if(!bInCH14){
//Write 0xa22 0xa23
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1,bMaskHWord, TempVal);
//Write 0xa24 ~ 0xa27
TempVal = 0;
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16 )+
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2,bMaskDWord, TempVal);
//Write 0xa28 0xa29
TempVal = 0;
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
rtl8192_setBBreg(dev, rCCK0_DebugPort,bMaskLWord, TempVal);
}
else
{
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1,bMaskHWord, TempVal);
//Write 0xa24 ~ 0xa27
TempVal = 0;
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16 )+
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2,bMaskDWord, TempVal);
//Write 0xa28 0xa29
TempVal = 0;
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
rtl8192_setBBreg(dev, rCCK0_DebugPort,bMaskLWord, TempVal);
}
}
static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH14)
{
u32 TempVal;
struct r8192_priv *priv = ieee80211_priv(dev);
TempVal = 0;
if(!bInCH14)
{
//Write 0xa22 0xa23
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][1]<<8) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
//Write 0xa24 ~ 0xa27
TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16 )+
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
//Write 0xa28 0xa29
TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ;
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_DebugPort, TempVal);
}
else
{
// priv->CCKTxPowerAdjustCntNotCh14++; //cosa add for debug.
//Write 0xa22 0xa23
TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
(CCKSwingTable_Ch14[priv->CCK_index][1]<<8) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
//Write 0xa24 ~ 0xa27
TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch14[priv->CCK_index][4]<<16 )+
(CCKSwingTable_Ch14[priv->CCK_index][5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
//Write 0xa28 0xa29
TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ;
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING,"CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_DebugPort, TempVal);
}
}
extern void dm_cck_txpower_adjust(
struct net_device *dev,
bool binch14
)
{ // dm_CCKTxPowerAdjust
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef RTL8190P
dm_CCKTxPowerAdjust_TSSI(dev, binch14);
#else
if(priv->bDcut == TRUE)
dm_CCKTxPowerAdjust_TSSI(dev, binch14);
else
dm_CCKTxPowerAdjust_ThermalMeter(dev, binch14);
#endif
}
#ifndef RTL8192U
static void dm_txpower_reset_recovery(
struct net_device *dev
)
{
struct r8192_priv *priv = ieee80211_priv(dev);
RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n");
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n",priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n",priv->rfa_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF A I/Q Amplify Gain is %ld\n",priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbb_iq_amplifygain);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: CCK Attenuation is %d dB\n",priv->cck_present_attentuation);
dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n",priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFC_txPowerTrackingIndex is %x\n",priv->rfc_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF C I/Q Amplify Gain is %ld\n",priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbb_iq_amplifygain);
} // dm_TXPowerResetRecovery
extern void dm_restore_dynamic_mechanism_state(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 reg_ratr = priv->rate_adaptive.last_ratr;
if(!priv->up)
{
RT_TRACE(COMP_RATE, "<---- dm_restore_dynamic_mechanism_state(): driver is going to unload\n");
return;
}
//
// Restore previous state for rate adaptive
//
if(priv->rate_adaptive.rate_adaptive_disabled)
return;
// TODO: Only 11n mode is implemented currently,
if( !(priv->ieee80211->mode==WIRELESS_MODE_N_24G ||
priv->ieee80211->mode==WIRELESS_MODE_N_5G))
return;
{
/* 2007/11/15 MH Copy from 8190PCI. */
u32 ratr_value;
ratr_value = reg_ratr;
if(priv->rf_type == RF_1T2R) // 1T2R, Spatial Stream 2 should be disabled
{
ratr_value &=~ (RATE_ALL_OFDM_2SS);
//DbgPrint("HW_VAR_TATR_0 from 0x%x ==> 0x%x\n", ((pu4Byte)(val))[0], ratr_value);
}
//DbgPrint("set HW_VAR_TATR_0 = 0x%x\n", ratr_value);
//cosa PlatformEFIOWrite4Byte(Adapter, RATR0, ((pu4Byte)(val))[0]);
write_nic_dword(dev, RATR0, ratr_value);
write_nic_byte(dev, UFWP, 1);
}
//Resore TX Power Tracking Index
if(priv->btxpower_trackingInit && priv->btxpower_tracking){
dm_txpower_reset_recovery(dev);
}
//
//Restore BB Initial Gain
//
dm_bb_initialgain_restore(dev);
} // DM_RestoreDynamicMechanismState
static void dm_bb_initialgain_restore(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 bit_mask = 0x7f; //Bit0~ Bit6
if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
return;
//Disable Initial Gain
//PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x800);
rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bit_mask, (u32)priv->initgain_backup.xaagccore1);
rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bit_mask, (u32)priv->initgain_backup.xbagccore1);
rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bit_mask, (u32)priv->initgain_backup.xcagccore1);
rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, bit_mask, (u32)priv->initgain_backup.xdagccore1);
bit_mask = bMaskByte2;
rtl8192_setBBreg(dev, rCCK0_CCA, bit_mask, (u32)priv->initgain_backup.cca);
RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n",priv->initgain_backup.xaagccore1);
RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc58 is %x\n",priv->initgain_backup.xbagccore1);
RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc60 is %x\n",priv->initgain_backup.xcagccore1);
RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc68 is %x\n",priv->initgain_backup.xdagccore1);
RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n",priv->initgain_backup.cca);
//Enable Initial Gain
//PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x100);
rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite.
} // dm_BBInitialGainRestore
extern void dm_backup_dynamic_mechanism_state(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
// Fsync to avoid reset
priv->bswitch_fsync = false;
priv->bfsync_processing = false;
//Backup BB InitialGain
dm_bb_initialgain_backup(dev);
} // DM_BackupDynamicMechanismState
static void dm_bb_initialgain_backup(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 bit_mask = bMaskByte0; //Bit0~ Bit6
if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
return;
//PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x800);
rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bit_mask);
priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bit_mask);
priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bit_mask);
priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, bit_mask);
bit_mask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bit_mask);
RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n",priv->initgain_backup.xaagccore1);
RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc58 is %x\n",priv->initgain_backup.xbagccore1);
RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc60 is %x\n",priv->initgain_backup.xcagccore1);
RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc68 is %x\n",priv->initgain_backup.xdagccore1);
RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xa0a is %x\n",priv->initgain_backup.cca);
} // dm_BBInitialGainBakcup
#endif
/*-----------------------------------------------------------------------------
* Function: dm_change_dynamic_initgain_thresh()
*
* Overview:
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/29/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
u32 dm_type,
u32 dm_value)
{
if (dm_type == DIG_TYPE_THRESH_HIGH)
{
dm_digtable.rssi_high_thresh = dm_value;
}
else if (dm_type == DIG_TYPE_THRESH_LOW)
{
dm_digtable.rssi_low_thresh = dm_value;
}
else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH)
{
dm_digtable.rssi_high_power_highthresh = dm_value;
}
else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH)
{
dm_digtable.rssi_high_power_highthresh = dm_value;
}
else if (dm_type == DIG_TYPE_ENABLE)
{
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_enable_flag = true;
}
else if (dm_type == DIG_TYPE_DISABLE)
{
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_enable_flag = false;
}
else if (dm_type == DIG_TYPE_DBG_MODE)
{
if(dm_value >= DM_DBG_MAX)
dm_value = DM_DBG_OFF;
dm_digtable.dbg_mode = (u8)dm_value;
}
else if (dm_type == DIG_TYPE_RSSI)
{
if(dm_value > 100)
dm_value = 30;
dm_digtable.rssi_val = (long)dm_value;
}
else if (dm_type == DIG_TYPE_ALGORITHM)
{
if (dm_value >= DIG_ALGO_MAX)
dm_value = DIG_ALGO_BY_FALSE_ALARM;
if(dm_digtable.dig_algorithm != (u8)dm_value)
dm_digtable.dig_algorithm_switch = 1;
dm_digtable.dig_algorithm = (u8)dm_value;
}
else if (dm_type == DIG_TYPE_BACKOFF)
{
if(dm_value > 30)
dm_value = 30;
dm_digtable.backoff_val = (u8)dm_value;
}
else if(dm_type == DIG_TYPE_RX_GAIN_MIN)
{
if(dm_value == 0)
dm_value = 0x1;
dm_digtable.rx_gain_range_min = (u8)dm_value;
}
else if(dm_type == DIG_TYPE_RX_GAIN_MAX)
{
if(dm_value > 0x50)
dm_value = 0x50;
dm_digtable.rx_gain_range_max = (u8)dm_value;
}
} /* DM_ChangeDynamicInitGainThresh */
extern void
dm_change_fsync_setting(
struct net_device *dev,
s32 DM_Type,
s32 DM_Value)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if (DM_Type == 0) // monitor 0xc38 register
{
if(DM_Value > 1)
DM_Value = 1;
priv->framesyncMonitor = (u8)DM_Value;
//DbgPrint("pHalData->framesyncMonitor = %d", pHalData->framesyncMonitor);
}
}
extern void
dm_change_rxpath_selection_setting(
struct net_device *dev,
s32 DM_Type,
s32 DM_Value)
{
struct r8192_priv *priv = ieee80211_priv(dev);
prate_adaptive pRA = (prate_adaptive)&(priv->rate_adaptive);
if(DM_Type == 0)
{
if(DM_Value > 1)
DM_Value = 1;
DM_RxPathSelTable.Enable = (u8)DM_Value;
}
else if(DM_Type == 1)
{
if(DM_Value > 1)
DM_Value = 1;
DM_RxPathSelTable.DbgMode = (u8)DM_Value;
}
else if(DM_Type == 2)
{
if(DM_Value > 40)
DM_Value = 40;
DM_RxPathSelTable.SS_TH_low = (u8)DM_Value;
}
else if(DM_Type == 3)
{
if(DM_Value > 25)
DM_Value = 25;
DM_RxPathSelTable.diff_TH = (u8)DM_Value;
}
else if(DM_Type == 4)
{
if(DM_Value >= CCK_Rx_Version_MAX)
DM_Value = CCK_Rx_Version_1;
DM_RxPathSelTable.cck_method= (u8)DM_Value;
}
else if(DM_Type == 10)
{
if(DM_Value > 100)
DM_Value = 50;
DM_RxPathSelTable.rf_rssi[0] = (u8)DM_Value;
}
else if(DM_Type == 11)
{
if(DM_Value > 100)
DM_Value = 50;
DM_RxPathSelTable.rf_rssi[1] = (u8)DM_Value;
}
else if(DM_Type == 12)
{
if(DM_Value > 100)
DM_Value = 50;
DM_RxPathSelTable.rf_rssi[2] = (u8)DM_Value;
}
else if(DM_Type == 13)
{
if(DM_Value > 100)
DM_Value = 50;
DM_RxPathSelTable.rf_rssi[3] = (u8)DM_Value;
}
else if(DM_Type == 20)
{
if(DM_Value > 1)
DM_Value = 1;
pRA->ping_rssi_enable = (u8)DM_Value;
}
else if(DM_Type == 21)
{
if(DM_Value > 30)
DM_Value = 30;
pRA->ping_rssi_thresh_for_ra = DM_Value;
}
}
/*-----------------------------------------------------------------------------
* Function: dm_dig_init()
*
* Overview: Set DIG scheme init value.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/15/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void dm_dig_init(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
/* 2007/10/05 MH Disable DIG scheme now. Not tested. */
dm_digtable.dig_enable_flag = true;
dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI;
dm_digtable.dbg_mode = DM_DBG_OFF; //off=by real rssi value, on=by DM_DigTable.Rssi_val for new dig
dm_digtable.dig_algorithm_switch = 0;
/* 2007/10/04 MH Define init gain threshol. */
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
dm_digtable.initialgain_lowerbound_state = false;
dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW;
dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH;
dm_digtable.rssi_high_power_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
dm_digtable.rssi_high_power_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
dm_digtable.rssi_val = 50; //for new dig debug rssi value
dm_digtable.backoff_val = DM_DIG_BACKOFF;
dm_digtable.rx_gain_range_max = DM_DIG_MAX;
if(priv->CustomerID == RT_CID_819x_Netcore)
dm_digtable.rx_gain_range_min = DM_DIG_MIN_Netcore;
else
dm_digtable.rx_gain_range_min = DM_DIG_MIN;
} /* dm_dig_init */
/*-----------------------------------------------------------------------------
* Function: dm_ctrl_initgain_byrssi()
*
* Overview: Driver must monitor RSSI and notify firmware to change initial
* gain according to different threshold. BB team provide the
* suggested solution.
*
* Input: struct net_device *dev
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/27/2008 amy Create Version 0 porting from windows code.
*---------------------------------------------------------------------------*/
static void dm_ctrl_initgain_byrssi(struct net_device *dev)
{
if (dm_digtable.dig_enable_flag == false)
return;
if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
dm_ctrl_initgain_byrssi_by_fwfalse_alarm(dev);
else if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
dm_ctrl_initgain_byrssi_by_driverrssi(dev);
// ;
else
return;
}
static void dm_ctrl_initgain_byrssi_by_driverrssi(
struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 i;
static u8 fw_dig=0;
if (dm_digtable.dig_enable_flag == false)
return;
//DbgPrint("Dig by Sw Rssi \n");
if(dm_digtable.dig_algorithm_switch) // if swithed algorithm, we have to disable FW Dig.
fw_dig = 0;
if(fw_dig <= 3) // execute several times to make sure the FW Dig is disabled
{// FW DIG Off
for(i=0; i<3; i++)
rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
fw_dig++;
dm_digtable.dig_state = DM_STA_DIG_OFF; //fw dig off.
}
if(priv->ieee80211->state == IEEE80211_LINKED)
dm_digtable.cur_connect_state = DIG_CONNECT;
else
dm_digtable.cur_connect_state = DIG_DISCONNECT;
//DbgPrint("DM_DigTable.PreConnectState = %d, DM_DigTable.CurConnectState = %d \n",
//DM_DigTable.PreConnectState, DM_DigTable.CurConnectState);
if(dm_digtable.dbg_mode == DM_DBG_OFF)
dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb;
//DbgPrint("DM_DigTable.Rssi_val = %d \n", DM_DigTable.Rssi_val);
dm_initial_gain(dev);
dm_pd_th(dev);
dm_cs_ratio(dev);
if(dm_digtable.dig_algorithm_switch)
dm_digtable.dig_algorithm_switch = 0;
dm_digtable.pre_connect_state = dm_digtable.cur_connect_state;
} /* dm_CtrlInitGainByRssi */
static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u32 reset_cnt = 0;
u8 i;
if (dm_digtable.dig_enable_flag == false)
return;
if(dm_digtable.dig_algorithm_switch)
{
dm_digtable.dig_state = DM_STA_DIG_MAX;
// Fw DIG On.
for(i=0; i<3; i++)
rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite.
dm_digtable.dig_algorithm_switch = 0;
}
if (priv->ieee80211->state != IEEE80211_LINKED)
return;
// For smooth, we can not change DIG state.
if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) &&
(priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh))
{
return;
}
//DbgPrint("Dig by Fw False Alarm\n");
//if (DM_DigTable.Dig_State == DM_STA_DIG_OFF)
/*DbgPrint("DIG Check\n\r RSSI=%d LOW=%d HIGH=%d STATE=%d",
pHalData->UndecoratedSmoothedPWDB, DM_DigTable.RssiLowThresh,
DM_DigTable.RssiHighThresh, DM_DigTable.Dig_State);*/
/* 1. When RSSI decrease, We have to judge if it is smaller than a treshold
and then execute below step. */
if ((priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh))
{
/* 2008/02/05 MH When we execute silent reset, the DIG PHY parameters
will be reset to init value. We must prevent the condition. */
if (dm_digtable.dig_state == DM_STA_DIG_OFF &&
(priv->reset_count == reset_cnt))
{
return;
}
else
{
reset_cnt = priv->reset_count;
}
// If DIG is off, DIG high power state must reset.
dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
dm_digtable.dig_state = DM_STA_DIG_OFF;
// 1.1 DIG Off.
rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite.
// 1.2 Set initial gain.
write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x17);
write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x17);
write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x17);
write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x17);
// 1.3 Lower PD_TH for OFDM.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
/* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
// 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
#ifdef RTL8190P
write_nic_byte(dev, rOFDM0_RxDetector1, 0x40);
#else
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
#endif
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(pAdapter, rOFDM0_RxDetector1, 0x40);
*/
//else if (pAdapter->HardwareType == HARDWARE_TYPE_RTL8192E)
//else
//PlatformEFIOWrite1Byte(pAdapter, rOFDM0_RxDetector1, 0x40);
}
else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
// 1.4 Lower CS ratio for CCK.
write_nic_byte(dev, 0xa0a, 0x08);
// 1.5 Higher EDCCA.
//PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x325);
return;
}
/* 2. When RSSI increase, We have to judge if it is larger than a treshold
and then execute below step. */
if ((priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) )
{
u8 reset_flag = 0;
if (dm_digtable.dig_state == DM_STA_DIG_ON &&
(priv->reset_count == reset_cnt))
{
dm_ctrl_initgain_byrssi_highpwr(dev);
return;
}
else
{
if (priv->reset_count != reset_cnt)
reset_flag = 1;
reset_cnt = priv->reset_count;
}
dm_digtable.dig_state = DM_STA_DIG_ON;
//DbgPrint("DIG ON\n\r");
// 2.1 Set initial gain.
// 2008/02/26 MH SD3-Jerry suggest to prevent dirty environment.
if (reset_flag == 1)
{
write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x2c);
write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x2c);
write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x2c);
write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x2c);
}
else
{
write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x20);
write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x20);
write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x20);
write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x20);
}
// 2.2 Higher PD_TH for OFDM.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
/* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
// 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
#ifdef RTL8190P
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
#else
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
#endif
/*
else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
*/
//else if (pAdapter->HardwareType == HARDWARE_TYPE_RTL8192E)
//else
//PlatformEFIOWrite1Byte(pAdapter, rOFDM0_RxDetector1, 0x42);
}
else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
// 2.3 Higher CS ratio for CCK.
write_nic_byte(dev, 0xa0a, 0xcd);
// 2.4 Lower EDCCA.
/* 2008/01/11 MH 90/92 series are the same. */
//PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x346);
// 2.5 DIG On.
rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite.
}
dm_ctrl_initgain_byrssi_highpwr(dev);
} /* dm_CtrlInitGainByRssi */
/*-----------------------------------------------------------------------------
* Function: dm_ctrl_initgain_byrssi_highpwr()
*
* Overview:
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/28/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void dm_ctrl_initgain_byrssi_highpwr(
struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u32 reset_cnt_highpwr = 0;
// For smooth, we can not change high power DIG state in the range.
if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_high_power_lowthresh) &&
(priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_highthresh))
{
return;
}
/* 3. When RSSI >75% or <70%, it is a high power issue. We have to judge if
it is larger than a treshold and then execute below step. */
// 2008/02/05 MH SD3-Jerry Modify PD_TH for high power issue.
if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_power_highthresh)
{
if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON &&
(priv->reset_count == reset_cnt_highpwr))
return;
else
dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
// 3.1 Higher PD_TH for OFDM for high power state.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
#ifdef RTL8190P
write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
#else
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
#endif
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
*/
}
else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
}
else
{
if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF&&
(priv->reset_count == reset_cnt_highpwr))
return;
else
dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF;
if (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_lowthresh &&
priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh)
{
// 3.2 Recover PD_TH for OFDM for normal power region.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
#ifdef RTL8190P
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
#else
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
#endif
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
*/
}
else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
}
}
reset_cnt_highpwr = priv->reset_count;
} /* dm_CtrlInitGainByRssiHighPwr */
static void dm_initial_gain(
struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 initial_gain=0;
static u8 initialized=0, force_write=0;
static u32 reset_cnt=0;
if(dm_digtable.dig_algorithm_switch)
{
initialized = 0;
reset_cnt = 0;
}
if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
{
if(dm_digtable.cur_connect_state == DIG_CONNECT)
{
if((dm_digtable.rssi_val+10-dm_digtable.backoff_val) > dm_digtable.rx_gain_range_max)
dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_max;
else if((dm_digtable.rssi_val+10-dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_min;
else
dm_digtable.cur_ig_value = dm_digtable.rssi_val+10-dm_digtable.backoff_val;
}
else //current state is disconnected
{
if(dm_digtable.cur_ig_value == 0)
dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
else
dm_digtable.cur_ig_value = dm_digtable.pre_ig_value;
}
}
else // disconnected -> connected or connected -> disconnected
{
dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
dm_digtable.pre_ig_value = 0;
}
//DbgPrint("DM_DigTable.CurIGValue = 0x%x, DM_DigTable.PreIGValue = 0x%x\n", DM_DigTable.CurIGValue, DM_DigTable.PreIGValue);
// if silent reset happened, we should rewrite the values back
if(priv->reset_count != reset_cnt)
{
force_write = 1;
reset_cnt = priv->reset_count;
}
if(dm_digtable.pre_ig_value != read_nic_byte(dev, rOFDM0_XAAGCCore1))
force_write = 1;
{
if((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
|| !initialized || force_write)
{
initial_gain = (u8)dm_digtable.cur_ig_value;
//DbgPrint("Write initial gain = 0x%x\n", initial_gain);
// Set initial gain.
write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
dm_digtable.pre_ig_value = dm_digtable.cur_ig_value;
initialized = 1;
force_write = 0;
}
}
}
static void dm_pd_th(
struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u8 initialized=0, force_write=0;
static u32 reset_cnt = 0;
if(dm_digtable.dig_algorithm_switch)
{
initialized = 0;
reset_cnt = 0;
}
if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
{
if(dm_digtable.cur_connect_state == DIG_CONNECT)
{
if (dm_digtable.rssi_val >= dm_digtable.rssi_high_power_highthresh)
dm_digtable.curpd_thstate = DIG_PD_AT_HIGH_POWER;
else if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh))
dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) &&
(dm_digtable.rssi_val < dm_digtable.rssi_high_power_lowthresh))
dm_digtable.curpd_thstate = DIG_PD_AT_NORMAL_POWER;
else
dm_digtable.curpd_thstate = dm_digtable.prepd_thstate;
}
else
{
dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
}
}
else // disconnected -> connected or connected -> disconnected
{
dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER;
}
// if silent reset happened, we should rewrite the values back
if(priv->reset_count != reset_cnt)
{
force_write = 1;
reset_cnt = priv->reset_count;
}
{
if((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) ||
(initialized<=3) || force_write)
{
//DbgPrint("Write PD_TH state = %d\n", DM_DigTable.CurPD_THState);
if(dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER)
{
// Lower PD_TH for OFDM.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
/* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
// 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
#ifdef RTL8190P
write_nic_byte(dev, rOFDM0_RxDetector1, 0x40);
#else
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
#endif
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x40);
*/
}
else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
}
else if(dm_digtable.curpd_thstate == DIG_PD_AT_NORMAL_POWER)
{
// Higher PD_TH for OFDM.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
/* 2008/01/11 MH 40MHZ 90/92 register are not the same. */
// 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same.
#ifdef RTL8190P
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
#else
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
#endif
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
*/
}
else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
}
else if(dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER)
{
// Higher PD_TH for OFDM for high power state.
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
{
#ifdef RTL8190P
write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
#else
write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
#endif
/*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P)
write_nic_byte(dev, rOFDM0_RxDetector1, 0x41);
*/
}
else
write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
}
dm_digtable.prepd_thstate = dm_digtable.curpd_thstate;
if(initialized <= 3)
initialized++;
force_write = 0;
}
}
}
static void dm_cs_ratio(
struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u8 initialized=0,force_write=0;
static u32 reset_cnt = 0;
if(dm_digtable.dig_algorithm_switch)
{
initialized = 0;
reset_cnt = 0;
}
if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
{
if(dm_digtable.cur_connect_state == DIG_CONNECT)
{
if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh))
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) )
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_HIGHER;
else
dm_digtable.curcs_ratio_state = dm_digtable.precs_ratio_state;
}
else
{
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
}
}
else // disconnected -> connected or connected -> disconnected
{
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
}
// if silent reset happened, we should rewrite the values back
if(priv->reset_count != reset_cnt)
{
force_write = 1;
reset_cnt = priv->reset_count;
}
{
if((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
!initialized || force_write)
{
//DbgPrint("Write CS_ratio state = %d\n", DM_DigTable.CurCS_ratioState);
if(dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER)
{
// Lower CS ratio for CCK.
write_nic_byte(dev, 0xa0a, 0x08);
}
else if(dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER)
{
// Higher CS ratio for CCK.
write_nic_byte(dev, 0xa0a, 0xcd);
}
dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state;
initialized = 1;
force_write = 0;
}
}
}
extern void dm_init_edca_turbo(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
priv->bcurrent_turbo_EDCA = false;
priv->ieee80211->bis_any_nonbepkts = false;
priv->bis_cur_rdlstate = false;
} // dm_init_edca_turbo
static void dm_check_edca_turbo(
struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
//PSTA_QOS pStaQos = pMgntInfo->pStaQos;
// Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.
static unsigned long lastTxOkCnt = 0;
static unsigned long lastRxOkCnt = 0;
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
//
// Do not be Turbo if it's under WiFi config and Qos Enabled, because the EDCA parameters
// should follow the settings from QAP. By Bruce, 2007-12-07.
//
if(priv->ieee80211->state != IEEE80211_LINKED)
goto dm_CheckEdcaTurbo_EXIT;
// We do not turn on EDCA turbo mode for some AP that has IOT issue
if(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
goto dm_CheckEdcaTurbo_EXIT;
// printk("========>%s():bis_any_nonbepkts is %d\n",__FUNCTION__,priv->bis_any_nonbepkts);
// Check the status for current condition.
if(!priv->ieee80211->bis_any_nonbepkts)
{
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
// For RT-AP, we needs to turn it on when Rx>Tx
if(curRxOkCnt > 4*curTxOkCnt)
{
//printk("%s():curRxOkCnt > 4*curTxOkCnt\n");
if(!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
{
write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL[pHTInfo->IOTPeer]);
priv->bis_cur_rdlstate = true;
}
}
else
{
//printk("%s():curRxOkCnt < 4*curTxOkCnt\n");
if(priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA)
{
write_nic_dword(dev, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]);
priv->bis_cur_rdlstate = false;
}
}
priv->bcurrent_turbo_EDCA = true;
}
else
{
//
// Turn Off EDCA turbo here.
// Restore original EDCA according to the declaration of AP.
//
if(priv->bcurrent_turbo_EDCA)
{
{
u8 u1bAIFS;
u32 u4bAcParam;
struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
u8 mode = priv->ieee80211->mode;
// For Each time updating EDCA parameter, reset EDCA turbo mode status.
dm_init_edca_turbo(dev);
u1bAIFS = qos_parameters->aifs[0] * ((mode&(IEEE_G|IEEE_N_24G)) ?9:20) + aSifsTime;
u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[0]))<< AC_PARAM_TXOP_LIMIT_OFFSET)|
(((u32)(qos_parameters->cw_max[0]))<< AC_PARAM_ECW_MAX_OFFSET)|
(((u32)(qos_parameters->cw_min[0]))<< AC_PARAM_ECW_MIN_OFFSET)|
((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
//write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);
write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
// Check ACM bit.
// If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13.
{
// TODO: Modified this part and try to set acm control in only 1 IO processing!!
PACI_AIFSN pAciAifsn = (PACI_AIFSN)&(qos_parameters->aifs[0]);
u8 AcmCtrl = read_nic_byte( dev, AcmHwCtrl );
if( pAciAifsn->f.ACM )
{ // ACM bit is 1.
AcmCtrl |= AcmHw_BeqEn;
}
else
{ // ACM bit is 0.
AcmCtrl &= (~AcmHw_BeqEn);
}
RT_TRACE( COMP_QOS,"SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl ) ;
write_nic_byte(dev, AcmHwCtrl, AcmCtrl );
}
}
priv->bcurrent_turbo_EDCA = false;
}
}
dm_CheckEdcaTurbo_EXIT:
// Set variables for next time.
priv->ieee80211->bis_any_nonbepkts = false;
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
} // dm_CheckEdcaTurbo
extern void DM_CTSToSelfSetting(struct net_device * dev,u32 DM_Type, u32 DM_Value)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
if (DM_Type == 0) // CTS to self disable/enable
{
if(DM_Value > 1)
DM_Value = 1;
priv->ieee80211->bCTSToSelfEnable = (bool)DM_Value;
//DbgPrint("pMgntInfo->bCTSToSelfEnable = %d\n", pMgntInfo->bCTSToSelfEnable);
}
else if(DM_Type == 1) //CTS to self Th
{
if(DM_Value >= 50)
DM_Value = 50;
priv->ieee80211->CTSToSelfTH = (u8)DM_Value;
//DbgPrint("pMgntInfo->CTSToSelfTH = %d\n", pMgntInfo->CTSToSelfTH);
}
}
static void dm_init_ctstoself(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
priv->ieee80211->bCTSToSelfEnable = TRUE;
priv->ieee80211->CTSToSelfTH = CTSToSelfTHVal;
}
static void dm_ctstoself(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
static unsigned long lastTxOkCnt = 0;
static unsigned long lastRxOkCnt = 0;
unsigned long curTxOkCnt = 0;
unsigned long curRxOkCnt = 0;
if(priv->ieee80211->bCTSToSelfEnable != TRUE)
{
pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
return;
}
/*
1. Uplink
2. Linksys350/Linksys300N
3. <50 disable, >55 enable
*/
if(pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM)
{
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
if(curRxOkCnt > 4*curTxOkCnt) //downlink, disable CTS to self
{
pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
//DbgPrint("dm_CTSToSelf() ==> CTS to self disabled -- downlink\n");
}
else //uplink
{
pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF;
}
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
}
}
/*-----------------------------------------------------------------------------
* Function: dm_check_rfctrl_gpio()
*
* Overview: Copy 8187B template for 9xseries.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/28/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void dm_check_rfctrl_gpio(struct net_device * dev)
{
//struct r8192_priv *priv = ieee80211_priv(dev);
// Walk around for DTM test, we will not enable HW - radio on/off because r/w
// page 1 register before Lextra bus is enabled cause system fails when resuming
// from S4. 20080218, Emily
// Stop to execute workitem to prevent S3/S4 bug.
#ifdef RTL8190P
return;
#endif
#ifdef RTL8192U
return;
#endif
#ifdef RTL8192E
queue_delayed_work(priv->priv_wq,&priv->gpio_change_rf_wq,0);
#endif
} /* dm_CheckRfCtrlGPIO */
/*-----------------------------------------------------------------------------
* Function: dm_check_pbc_gpio()
*
* Overview: Check if PBC button is pressed.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/28/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void dm_check_pbc_gpio(struct net_device *dev)
{
#ifdef RTL8192U
struct r8192_priv *priv = ieee80211_priv(dev);
u8 tmp1byte;
tmp1byte = read_nic_byte(dev,GPI);
if(tmp1byte == 0xff)
return;
if (tmp1byte&BIT6 || tmp1byte&BIT0)
{
// Here we only set bPbcPressed to TRUE
// After trigger PBC, the variable will be set to FALSE
RT_TRACE(COMP_IO, "CheckPbcGPIO - PBC is pressed\n");
priv->bpbc_pressed = true;
}
#endif
}
#ifdef RTL8192E
/*-----------------------------------------------------------------------------
* Function: dm_GPIOChangeRF
* Overview: PCI will not support workitem call back HW radio on-off control.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 02/21/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
extern void dm_gpio_change_rf_callback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,gpio_change_rf_wq);
struct net_device *dev = priv->ieee80211->dev;
u8 tmp1byte;
RT_RF_POWER_STATE eRfPowerStateToSet;
bool bActuallySet = false;
do{
bActuallySet=false;
if(!priv->up)
{
RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),"dm_gpio_change_rf_callback(): Callback function breaks out!!\n");
}
else
{
// 0x108 GPIO input register is read only
//set 0x108 B1= 1: RF-ON; 0: RF-OFF.
tmp1byte = read_nic_byte(dev,GPI);
eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff;
if( (priv->bHwRadioOff == true) && (eRfPowerStateToSet == eRfOn))
{
RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n");
priv->bHwRadioOff = false;
bActuallySet = true;
}
else if ( (priv->bHwRadioOff == false) && (eRfPowerStateToSet == eRfOff))
{
RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n");
priv->bHwRadioOff = true;
bActuallySet = true;
}
if(bActuallySet)
{
#ifdef TO_DO
MgntActSet_RF_State(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW);
//DrvIFIndicateCurrentPhyStatus(pAdapter);
#endif
}
else
{
msleep(2000);
}
}
}while(TRUE)
} /* dm_GPIOChangeRF */
#endif
/*-----------------------------------------------------------------------------
* Function: DM_RFPathCheckWorkItemCallBack()
*
* Overview: Check if Current RF RX path is enabled
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 01/30/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,rfpath_check_wq);
struct net_device *dev =priv->ieee80211->dev;
//bool bactually_set = false;
u8 rfpath = 0, i;
/* 2008/01/30 MH After discussing with SD3 Jerry, 0xc04/0xd04 register will
always be the same. We only read 0xc04 now. */
rfpath = read_nic_byte(dev, 0xc04);
// Check Bit 0-3, it means if RF A-D is enabled.
for (i = 0; i < RF90_PATH_MAX; i++)
{
if (rfpath & (0x01<<i))
priv->brfpath_rxenable[i] = 1;
else
priv->brfpath_rxenable[i] = 0;
}
if(!DM_RxPathSelTable.Enable)
return;
dm_rxpath_sel_byrssi(dev);
} /* DM_RFPathCheckWorkItemCallBack */
static void dm_init_rxpath_selection(struct net_device * dev)
{
u8 i;
struct r8192_priv *priv = ieee80211_priv(dev);
DM_RxPathSelTable.Enable = 1; //default enabled
DM_RxPathSelTable.SS_TH_low = RxPathSelection_SS_TH_low;
DM_RxPathSelTable.diff_TH = RxPathSelection_diff_TH;
if(priv->CustomerID == RT_CID_819x_Netcore)
DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
else
DM_RxPathSelTable.cck_method = CCK_Rx_Version_1;
DM_RxPathSelTable.DbgMode = DM_DBG_OFF;
DM_RxPathSelTable.disabledRF = 0;
for(i=0; i<4; i++)
{
DM_RxPathSelTable.rf_rssi[i] = 50;
DM_RxPathSelTable.cck_pwdb_sta[i] = -64;
DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
}
}
static void dm_rxpath_sel_byrssi(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 i, max_rssi_index=0, min_rssi_index=0, sec_rssi_index=0, rf_num=0;
u8 tmp_max_rssi=0, tmp_min_rssi=0, tmp_sec_rssi=0;
u8 cck_default_Rx=0x2; //RF-C
u8 cck_optional_Rx=0x3;//RF-D
long tmp_cck_max_pwdb=0, tmp_cck_min_pwdb=0, tmp_cck_sec_pwdb=0;
u8 cck_rx_ver2_max_index=0, cck_rx_ver2_min_index=0, cck_rx_ver2_sec_index=0;
u8 cur_rf_rssi;
long cur_cck_pwdb;
static u8 disabled_rf_cnt=0, cck_Rx_Path_initialized=0;
u8 update_cck_rx_path;
if(priv->rf_type != RF_2T4R)
return;
if(!cck_Rx_Path_initialized)
{
DM_RxPathSelTable.cck_Rx_path = (read_nic_byte(dev, 0xa07)&0xf);
cck_Rx_Path_initialized = 1;
}
DM_RxPathSelTable.disabledRF = 0xf;
DM_RxPathSelTable.disabledRF &=~ (read_nic_byte(dev, 0xc04));
if(priv->ieee80211->mode == WIRELESS_MODE_B)
{
DM_RxPathSelTable.cck_method = CCK_Rx_Version_2; //pure B mode, fixed cck version2
//DbgPrint("Pure B mode, use cck rx version2 \n");
}
//decide max/sec/min rssi index
for (i=0; i<RF90_PATH_MAX; i++)
{
if(!DM_RxPathSelTable.DbgMode)
DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
if(priv->brfpath_rxenable[i])
{
rf_num++;
cur_rf_rssi = DM_RxPathSelTable.rf_rssi[i];
if(rf_num == 1) // find first enabled rf path and the rssi values
{ //initialize, set all rssi index to the same one
max_rssi_index = min_rssi_index = sec_rssi_index = i;
tmp_max_rssi = tmp_min_rssi = tmp_sec_rssi = cur_rf_rssi;
}
else if(rf_num == 2)
{ // we pick up the max index first, and let sec and min to be the same one
if(cur_rf_rssi >= tmp_max_rssi)
{
tmp_max_rssi = cur_rf_rssi;
max_rssi_index = i;
}
else
{
tmp_sec_rssi = tmp_min_rssi = cur_rf_rssi;
sec_rssi_index = min_rssi_index = i;
}
}
else
{
if(cur_rf_rssi > tmp_max_rssi)
{
tmp_sec_rssi = tmp_max_rssi;
sec_rssi_index = max_rssi_index;
tmp_max_rssi = cur_rf_rssi;
max_rssi_index = i;
}
else if(cur_rf_rssi == tmp_max_rssi)
{ // let sec and min point to the different index
tmp_sec_rssi = cur_rf_rssi;
sec_rssi_index = i;
}
else if((cur_rf_rssi < tmp_max_rssi) &&(cur_rf_rssi > tmp_sec_rssi))
{
tmp_sec_rssi = cur_rf_rssi;
sec_rssi_index = i;
}
else if(cur_rf_rssi == tmp_sec_rssi)
{
if(tmp_sec_rssi == tmp_min_rssi)
{ // let sec and min point to the different index
tmp_sec_rssi = cur_rf_rssi;
sec_rssi_index = i;
}
else
{
// This case we don't need to set any index
}
}
else if((cur_rf_rssi < tmp_sec_rssi) && (cur_rf_rssi > tmp_min_rssi))
{
// This case we don't need to set any index
}
else if(cur_rf_rssi == tmp_min_rssi)
{
if(tmp_sec_rssi == tmp_min_rssi)
{ // let sec and min point to the different index
tmp_min_rssi = cur_rf_rssi;
min_rssi_index = i;
}
else
{
// This case we don't need to set any index
}
}
else if(cur_rf_rssi < tmp_min_rssi)
{
tmp_min_rssi = cur_rf_rssi;
min_rssi_index = i;
}
}
}
}
rf_num = 0;
// decide max/sec/min cck pwdb index
if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_2)
{
for (i=0; i<RF90_PATH_MAX; i++)
{
if(priv->brfpath_rxenable[i])
{
rf_num++;
cur_cck_pwdb = DM_RxPathSelTable.cck_pwdb_sta[i];
if(rf_num == 1) // find first enabled rf path and the rssi values
{ //initialize, set all rssi index to the same one
cck_rx_ver2_max_index = cck_rx_ver2_min_index = cck_rx_ver2_sec_index = i;
tmp_cck_max_pwdb = tmp_cck_min_pwdb = tmp_cck_sec_pwdb = cur_cck_pwdb;
}
else if(rf_num == 2)
{ // we pick up the max index first, and let sec and min to be the same one
if(cur_cck_pwdb >= tmp_cck_max_pwdb)
{
tmp_cck_max_pwdb = cur_cck_pwdb;
cck_rx_ver2_max_index = i;
}
else
{
tmp_cck_sec_pwdb = tmp_cck_min_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = cck_rx_ver2_min_index = i;
}
}
else
{
if(cur_cck_pwdb > tmp_cck_max_pwdb)
{
tmp_cck_sec_pwdb = tmp_cck_max_pwdb;
cck_rx_ver2_sec_index = cck_rx_ver2_max_index;
tmp_cck_max_pwdb = cur_cck_pwdb;
cck_rx_ver2_max_index = i;
}
else if(cur_cck_pwdb == tmp_cck_max_pwdb)
{ // let sec and min point to the different index
tmp_cck_sec_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
}
else if((cur_cck_pwdb < tmp_cck_max_pwdb) &&(cur_cck_pwdb > tmp_cck_sec_pwdb))
{
tmp_cck_sec_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
}
else if(cur_cck_pwdb == tmp_cck_sec_pwdb)
{
if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
{ // let sec and min point to the different index
tmp_cck_sec_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
}
else
{
// This case we don't need to set any index
}
}
else if((cur_cck_pwdb < tmp_cck_sec_pwdb) && (cur_cck_pwdb > tmp_cck_min_pwdb))
{
// This case we don't need to set any index
}
else if(cur_cck_pwdb == tmp_cck_min_pwdb)
{
if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
{ // let sec and min point to the different index
tmp_cck_min_pwdb = cur_cck_pwdb;
cck_rx_ver2_min_index = i;
}
else
{
// This case we don't need to set any index
}
}
else if(cur_cck_pwdb < tmp_cck_min_pwdb)
{
tmp_cck_min_pwdb = cur_cck_pwdb;
cck_rx_ver2_min_index = i;
}
}
}
}
}
// Set CCK Rx path
// reg0xA07[3:2]=cck default rx path, reg0xa07[1:0]=cck optional rx path.
update_cck_rx_path = 0;
if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_2)
{
cck_default_Rx = cck_rx_ver2_max_index;
cck_optional_Rx = cck_rx_ver2_sec_index;
if(tmp_cck_max_pwdb != -64)
update_cck_rx_path = 1;
}
if(tmp_min_rssi < DM_RxPathSelTable.SS_TH_low && disabled_rf_cnt < 2)
{
if((tmp_max_rssi - tmp_min_rssi) >= DM_RxPathSelTable.diff_TH)
{
//record the enabled rssi threshold
DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] = tmp_max_rssi+5;
//disable the BB Rx path, OFDM
rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<min_rssi_index, 0x0); // 0xc04[3:0]
rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<min_rssi_index, 0x0); // 0xd04[3:0]
disabled_rf_cnt++;
}
if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_1)
{
cck_default_Rx = max_rssi_index;
cck_optional_Rx = sec_rssi_index;
if(tmp_max_rssi)
update_cck_rx_path = 1;
}
}
if(update_cck_rx_path)
{
DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2)|(cck_optional_Rx);
rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000, DM_RxPathSelTable.cck_Rx_path);
}
if(DM_RxPathSelTable.disabledRF)
{
for(i=0; i<4; i++)
{
if((DM_RxPathSelTable.disabledRF>>i) & 0x1) //disabled rf
{
if(tmp_max_rssi >= DM_RxPathSelTable.rf_enable_rssi_th[i])
{
//enable the BB Rx path
//DbgPrint("RF-%d is enabled. \n", 0x1<<i);
rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<i, 0x1); // 0xc04[3:0]
rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<i, 0x1); // 0xd04[3:0]
DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
disabled_rf_cnt--;
}
}
}
}
}
/*-----------------------------------------------------------------------------
* Function: dm_check_rx_path_selection()
*
* Overview: Call a workitem to check current RXRF path and Rx Path selection by RSSI.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/28/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void dm_check_rx_path_selection(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
queue_delayed_work(priv->priv_wq,&priv->rfpath_check_wq,0);
} /* dm_CheckRxRFPath */
static void dm_init_fsync (struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
priv->ieee80211->fsync_time_interval = 500;
priv->ieee80211->fsync_rate_bitmap = 0x0f000800;
priv->ieee80211->fsync_rssi_threshold = 30;
#ifdef RTL8190P
priv->ieee80211->bfsync_enable = true;
#else
priv->ieee80211->bfsync_enable = false;
#endif
priv->ieee80211->fsync_multiple_timeinterval = 3;
priv->ieee80211->fsync_firstdiff_ratethreshold= 100;
priv->ieee80211->fsync_seconddiff_ratethreshold= 200;
priv->ieee80211->fsync_state = Default_Fsync;
priv->framesyncMonitor = 1; // current default 0xc38 monitor on
init_timer(&priv->fsync_timer);
priv->fsync_timer.data = (unsigned long)dev;
priv->fsync_timer.function = dm_fsync_timer_callback;
}
static void dm_deInit_fsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
del_timer_sync(&priv->fsync_timer);
}
extern void dm_fsync_timer_callback(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
u32 rate_index, rate_count = 0, rate_count_diff=0;
bool bSwitchFromCountDiff = false;
bool bDoubleTimeInterval = false;
if( priv->ieee80211->state == IEEE80211_LINKED &&
priv->ieee80211->bfsync_enable &&
(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC))
{
// Count rate 54, MCS [7], [12, 13, 14, 15]
u32 rate_bitmap;
for(rate_index = 0; rate_index <= 27; rate_index++)
{
rate_bitmap = 1 << rate_index;
if(priv->ieee80211->fsync_rate_bitmap & rate_bitmap)
rate_count+= priv->stats.received_rate_histogram[1][rate_index];
}
if(rate_count < priv->rate_record)
rate_count_diff = 0xffffffff - rate_count + priv->rate_record;
else
rate_count_diff = rate_count - priv->rate_record;
if(rate_count_diff < priv->rateCountDiffRecord)
{
u32 DiffNum = priv->rateCountDiffRecord - rate_count_diff;
// Contiune count
if(DiffNum >= priv->ieee80211->fsync_seconddiff_ratethreshold)
priv->ContiuneDiffCount++;
else
priv->ContiuneDiffCount = 0;
// Contiune count over
if(priv->ContiuneDiffCount >=2)
{
bSwitchFromCountDiff = true;
priv->ContiuneDiffCount = 0;
}
}
else
{
// Stop contiune count
priv->ContiuneDiffCount = 0;
}
//If Count diff <= FsyncRateCountThreshold
if(rate_count_diff <= priv->ieee80211->fsync_firstdiff_ratethreshold)
{
bSwitchFromCountDiff = true;
priv->ContiuneDiffCount = 0;
}
priv->rate_record = rate_count;
priv->rateCountDiffRecord = rate_count_diff;
RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff , priv->bswitch_fsync);
// if we never receive those mcs rate and rssi > 30 % then switch fsyn
if(priv->undecorated_smoothed_pwdb > priv->ieee80211->fsync_rssi_threshold && bSwitchFromCountDiff)
{
bDoubleTimeInterval = true;
priv->bswitch_fsync = !priv->bswitch_fsync;
if(priv->bswitch_fsync)
{
#ifdef RTL8190P
write_nic_byte(dev, 0xC36, 0x00);
#else
write_nic_byte(dev,0xC36, 0x1c);
#endif
write_nic_byte(dev, 0xC3e, 0x90);
}
else
{
#ifdef RTL8190P
write_nic_byte(dev, 0xC36, 0x40);
#else
write_nic_byte(dev, 0xC36, 0x5c);
#endif
write_nic_byte(dev, 0xC3e, 0x96);
}
}
else if(priv->undecorated_smoothed_pwdb <= priv->ieee80211->fsync_rssi_threshold)
{
if(priv->bswitch_fsync)
{
priv->bswitch_fsync = false;
#ifdef RTL8190P
write_nic_byte(dev, 0xC36, 0x40);
#else
write_nic_byte(dev, 0xC36, 0x5c);
#endif
write_nic_byte(dev, 0xC3e, 0x96);
}
}
if(bDoubleTimeInterval){
if(timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
add_timer(&priv->fsync_timer);
}
else{
if(timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
add_timer(&priv->fsync_timer);
}
}
else
{
// Let Register return to default value;
if(priv->bswitch_fsync)
{
priv->bswitch_fsync = false;
#ifdef RTL8190P
write_nic_byte(dev, 0xC36, 0x40);
#else
write_nic_byte(dev, 0xC36, 0x5c);
#endif
write_nic_byte(dev, 0xC3e, 0x96);
}
priv->ContiuneDiffCount = 0;
#ifdef RTL8190P
write_nic_dword(dev, rOFDM0_RxDetector2, 0x164052cd);
#else
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
#endif
}
RT_TRACE(COMP_HALDM, "ContiuneDiffCount %d\n", priv->ContiuneDiffCount);
RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff , priv->bswitch_fsync);
}
static void dm_StartHWFsync(struct net_device *dev)
{
RT_TRACE(COMP_HALDM, "%s\n", __FUNCTION__);
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cf);
write_nic_byte(dev, 0xc3b, 0x41);
}
static void dm_EndSWFsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
RT_TRACE(COMP_HALDM, "%s\n", __FUNCTION__);
del_timer_sync(&(priv->fsync_timer));
// Let Register return to default value;
if(priv->bswitch_fsync)
{
priv->bswitch_fsync = false;
#ifdef RTL8190P
write_nic_byte(dev, 0xC36, 0x40);
#else
write_nic_byte(dev, 0xC36, 0x5c);
#endif
write_nic_byte(dev, 0xC3e, 0x96);
}
priv->ContiuneDiffCount = 0;
#ifndef RTL8190P
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
#endif
}
static void dm_StartSWFsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 rateIndex;
u32 rateBitmap;
RT_TRACE(COMP_HALDM,"%s\n", __FUNCTION__);
// Initial rate record to zero, start to record.
priv->rate_record = 0;
// Initial contiune diff count to zero, start to record.
priv->ContiuneDiffCount = 0;
priv->rateCountDiffRecord = 0;
priv->bswitch_fsync = false;
if(priv->ieee80211->mode == WIRELESS_MODE_N_24G)
{
priv->ieee80211->fsync_firstdiff_ratethreshold= 600;
priv->ieee80211->fsync_seconddiff_ratethreshold = 0xffff;
}
else
{
priv->ieee80211->fsync_firstdiff_ratethreshold= 200;
priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
}
for(rateIndex = 0; rateIndex <= 27; rateIndex++)
{
rateBitmap = 1 << rateIndex;
if(priv->ieee80211->fsync_rate_bitmap & rateBitmap)
priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex];
}
if(timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
add_timer(&priv->fsync_timer);
#ifndef RTL8190P
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
#endif
}
static void dm_EndHWFsync(struct net_device *dev)
{
RT_TRACE(COMP_HALDM,"%s\n", __FUNCTION__);
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
write_nic_byte(dev, 0xc3b, 0x49);
}
void dm_check_fsync(struct net_device *dev)
{
#define RegC38_Default 0
#define RegC38_NonFsync_Other_AP 1
#define RegC38_Fsync_AP_BCM 2
struct r8192_priv *priv = ieee80211_priv(dev);
//u32 framesyncC34;
static u8 reg_c38_State=RegC38_Default;
static u32 reset_cnt=0;
RT_TRACE(COMP_HALDM, "RSSI %d TimeInterval %d MultipleTimeInterval %d\n", priv->ieee80211->fsync_rssi_threshold, priv->ieee80211->fsync_time_interval, priv->ieee80211->fsync_multiple_timeinterval);
RT_TRACE(COMP_HALDM, "RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n", priv->ieee80211->fsync_rate_bitmap, priv->ieee80211->fsync_firstdiff_ratethreshold, priv->ieee80211->fsync_seconddiff_ratethreshold);
if( priv->ieee80211->state == IEEE80211_LINKED &&
(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC))
{
if(priv->ieee80211->bfsync_enable == 0)
{
switch(priv->ieee80211->fsync_state)
{
case Default_Fsync:
dm_StartHWFsync(dev);
priv->ieee80211->fsync_state = HW_Fsync;
break;
case SW_Fsync:
dm_EndSWFsync(dev);
dm_StartHWFsync(dev);
priv->ieee80211->fsync_state = HW_Fsync;
break;
case HW_Fsync:
default:
break;
}
}
else
{
switch(priv->ieee80211->fsync_state)
{
case Default_Fsync:
dm_StartSWFsync(dev);
priv->ieee80211->fsync_state = SW_Fsync;
break;
case HW_Fsync:
dm_EndHWFsync(dev);
dm_StartSWFsync(dev);
priv->ieee80211->fsync_state = SW_Fsync;
break;
case SW_Fsync:
default:
break;
}
}
if(priv->framesyncMonitor)
{
if(reg_c38_State != RegC38_Fsync_AP_BCM)
{ //For broadcom AP we write different default value
#ifdef RTL8190P
write_nic_byte(dev, rOFDM0_RxDetector3, 0x15);
#else
write_nic_byte(dev, rOFDM0_RxDetector3, 0x95);
#endif
reg_c38_State = RegC38_Fsync_AP_BCM;
}
}
}
else
{
switch(priv->ieee80211->fsync_state)
{
case HW_Fsync:
dm_EndHWFsync(dev);
priv->ieee80211->fsync_state = Default_Fsync;
break;
case SW_Fsync:
dm_EndSWFsync(dev);
priv->ieee80211->fsync_state = Default_Fsync;
break;
case Default_Fsync:
default:
break;
}
if(priv->framesyncMonitor)
{
if(priv->ieee80211->state == IEEE80211_LINKED)
{
if(priv->undecorated_smoothed_pwdb <= RegC38_TH)
{
if(reg_c38_State != RegC38_NonFsync_Other_AP)
{
#ifdef RTL8190P
write_nic_byte(dev, rOFDM0_RxDetector3, 0x10);
#else
write_nic_byte(dev, rOFDM0_RxDetector3, 0x90);
#endif
reg_c38_State = RegC38_NonFsync_Other_AP;
}
}
else if(priv->undecorated_smoothed_pwdb >= (RegC38_TH+5))
{
if(reg_c38_State)
{
write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
reg_c38_State = RegC38_Default;
//DbgPrint("Fsync is idle, rssi>=40, write 0xc38 = 0x%x \n", pHalData->framesync);
}
}
}
else
{
if(reg_c38_State)
{
write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
reg_c38_State = RegC38_Default;
//DbgPrint("Fsync is idle, not connected, write 0xc38 = 0x%x \n", pHalData->framesync);
}
}
}
}
if(priv->framesyncMonitor)
{
if(priv->reset_count != reset_cnt)
{ //After silent reset, the reg_c38_State will be returned to default value
write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
reg_c38_State = RegC38_Default;
reset_cnt = priv->reset_count;
//DbgPrint("reg_c38_State = 0 for silent reset. \n");
}
}
else
{
if(reg_c38_State)
{
write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
reg_c38_State = RegC38_Default;
//DbgPrint("framesync no monitor, write 0xc38 = 0x%x \n", pHalData->framesync);
}
}
}
/*-----------------------------------------------------------------------------
* Function: dm_shadow_init()
*
* Overview: Store all NIC MAC/BB register content.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/29/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
extern void dm_shadow_init(struct net_device *dev)
{
u8 page;
u16 offset;
for (page = 0; page < 5; page++)
for (offset = 0; offset < 256; offset++)
{
dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256);
//DbgPrint("P-%d/O-%02x=%02x\r\n", page, offset, DM_Shadow[page][offset]);
}
for (page = 8; page < 11; page++)
for (offset = 0; offset < 256; offset++)
dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256);
for (page = 12; page < 15; page++)
for (offset = 0; offset < 256; offset++)
dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256);
} /* dm_shadow_init */
/*---------------------------Define function prototype------------------------*/
/*-----------------------------------------------------------------------------
* Function: DM_DynamicTxPower()
*
* Overview: Detect Signal strength to control TX Registry
Tx Power Control For Near/Far Range
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 03/06/2008 Jacken Create Version 0.
*
*---------------------------------------------------------------------------*/
static void dm_init_dynamic_txpower(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
//Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code.
priv->ieee80211->bdynamic_txpower_enable = true; //Default to enable Tx Power Control
priv->bLastDTPFlag_High = false;
priv->bLastDTPFlag_Low = false;
priv->bDynamicTxHighPower = false;
priv->bDynamicTxLowPower = false;
}
static void dm_dynamic_txpower(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
unsigned int txhipower_threshhold=0;
unsigned int txlowpower_threshold=0;
if(priv->ieee80211->bdynamic_txpower_enable != true)
{
priv->bDynamicTxHighPower = false;
priv->bDynamicTxLowPower = false;
return;
}
//printk("priv->ieee80211->current_network.unknown_cap_exist is %d ,priv->ieee80211->current_network.broadcom_cap_exist is %d\n",priv->ieee80211->current_network.unknown_cap_exist,priv->ieee80211->current_network.broadcom_cap_exist);
if((priv->ieee80211->current_network.atheros_cap_exist ) && (priv->ieee80211->mode == IEEE_G)){
txhipower_threshhold = TX_POWER_ATHEROAP_THRESH_HIGH;
txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
}
else
{
txhipower_threshhold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
}
// printk("=======>%s(): txhipower_threshhold is %d,txlowpower_threshold is %d\n",__FUNCTION__,txhipower_threshhold,txlowpower_threshold);
RT_TRACE(COMP_TXAGC,"priv->undecorated_smoothed_pwdb = %ld \n" , priv->undecorated_smoothed_pwdb);
if(priv->ieee80211->state == IEEE80211_LINKED)
{
if(priv->undecorated_smoothed_pwdb >= txhipower_threshhold)
{
priv->bDynamicTxHighPower = true;
priv->bDynamicTxLowPower = false;
}
else
{
// high power state check
if(priv->undecorated_smoothed_pwdb < txlowpower_threshold && priv->bDynamicTxHighPower == true)
{
priv->bDynamicTxHighPower = false;
}
// low power state check
if(priv->undecorated_smoothed_pwdb < 35)
{
priv->bDynamicTxLowPower = true;
}
else if(priv->undecorated_smoothed_pwdb >= 40)
{
priv->bDynamicTxLowPower = false;
}
}
}
else
{
//pHalData->bTXPowerCtrlforNearFarRange = !pHalData->bTXPowerCtrlforNearFarRange;
priv->bDynamicTxHighPower = false;
priv->bDynamicTxLowPower = false;
}
if( (priv->bDynamicTxHighPower != priv->bLastDTPFlag_High ) ||
(priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low ) )
{
RT_TRACE(COMP_TXAGC,"SetTxPowerLevel8190() channel = %d \n" , priv->ieee80211->current_network.channel);
#if defined(RTL8190P) || defined(RTL8192E)
SetTxPowerLevel8190(Adapter,pHalData->CurrentChannel);
#endif
#ifdef RTL8192U
rtl8192_phy_setTxPower(dev,priv->ieee80211->current_network.channel);
//pHalData->bStartTxCtrlByTPCNFR = FALSE; //Clear th flag of Set TX Power from Sitesurvey
#endif
}
priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower;
} /* dm_dynamic_txpower */
//added by vivi, for read tx rate and retrycount
static void dm_check_txrateandretrycount(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
//for 11n tx rate
// priv->stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg);
ieee->softmac_stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg);
//printk("=============>tx_rate_reg:%x\n", ieee->softmac_stats.CurrentShowTxate);
//for initial tx rate
// priv->stats.last_packet_rate = read_nic_byte(dev, Initial_Tx_Rate_Reg);
ieee->softmac_stats.last_packet_rate = read_nic_byte(dev ,Initial_Tx_Rate_Reg);
//for tx tx retry count
// priv->stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg);
ieee->softmac_stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg);
}
static void dm_send_rssi_tofw(struct net_device *dev)
{
DCMD_TXCMD_T tx_cmd;
struct r8192_priv *priv = ieee80211_priv(dev);
// If we test chariot, we should stop the TX command ?
// Because 92E will always silent reset when we send tx command. We use register
// 0x1e0(byte) to botify driver.
write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
return;
tx_cmd.Op = TXCMD_SET_RX_RSSI;
tx_cmd.Length = 4;
tx_cmd.Value = priv->undecorated_smoothed_pwdb;
cmpk_message_handle_tx(dev, (u8*)&tx_cmd,
DESC_PACKET_TYPE_INIT, sizeof(DCMD_TXCMD_T));
}
/*---------------------------Define function prototype------------------------*/
| gpl-2.0 |
Andiry/linux-test | sound/soc/omap/omap-twl4030.c | 134 | 10688 | /*
* omap-twl4030.c -- SoC audio for TI SoC based boards with twl4030 codec
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
* All rights reserved.
*
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*
* This driver replaces the following machine drivers:
* omap3beagle (Author: Steve Sakoman <steve@sakoman.com>)
* omap3evm (Author: Anuj Aggarwal <anuj.aggarwal@ti.com>)
* overo (Author: Steve Sakoman <steve@sakoman.com>)
* igep0020 (Author: Enric Balletbo i Serra <eballetbo@iseebcn.com>)
* zoom2 (Author: Misael Lopez Cruz <misael.lopez@ti.com>)
* sdp3430 (Author: Misael Lopez Cruz <misael.lopez@ti.com>)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/platform_device.h>
#include <linux/platform_data/omap-twl4030.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include "omap-mcbsp.h"
struct omap_twl4030 {
int jack_detect; /* board can detect jack events */
struct snd_soc_jack hs_jack;
};
static int omap_twl4030_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
unsigned int fmt;
switch (params_channels(params)) {
case 2: /* Stereo I2S mode */
fmt = SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM;
break;
case 4: /* Four channel TDM mode */
fmt = SND_SOC_DAIFMT_DSP_A |
SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBM_CFM;
break;
default:
return -EINVAL;
}
return snd_soc_runtime_set_dai_fmt(rtd, fmt);
}
static struct snd_soc_ops omap_twl4030_ops = {
.hw_params = omap_twl4030_hw_params,
};
static const struct snd_soc_dapm_widget dapm_widgets[] = {
SND_SOC_DAPM_SPK("Earpiece Spk", NULL),
SND_SOC_DAPM_SPK("Handsfree Spk", NULL),
SND_SOC_DAPM_HP("Headset Stereophone", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_SPK("Carkit Spk", NULL),
SND_SOC_DAPM_MIC("Main Mic", NULL),
SND_SOC_DAPM_MIC("Sub Mic", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Carkit Mic", NULL),
SND_SOC_DAPM_MIC("Digital0 Mic", NULL),
SND_SOC_DAPM_MIC("Digital1 Mic", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
/* Headset Stereophone: HSOL, HSOR */
{"Headset Stereophone", NULL, "HSOL"},
{"Headset Stereophone", NULL, "HSOR"},
/* External Speakers: HFL, HFR */
{"Handsfree Spk", NULL, "HFL"},
{"Handsfree Spk", NULL, "HFR"},
/* External Speakers: PredrivL, PredrivR */
{"Ext Spk", NULL, "PREDRIVEL"},
{"Ext Spk", NULL, "PREDRIVER"},
/* Carkit speakers: CARKITL, CARKITR */
{"Carkit Spk", NULL, "CARKITL"},
{"Carkit Spk", NULL, "CARKITR"},
/* Earpiece */
{"Earpiece Spk", NULL, "EARPIECE"},
/* External Mics: MAINMIC, SUBMIC with bias */
{"MAINMIC", NULL, "Main Mic"},
{"Main Mic", NULL, "Mic Bias 1"},
{"SUBMIC", NULL, "Sub Mic"},
{"Sub Mic", NULL, "Mic Bias 2"},
/* Headset Mic: HSMIC with bias */
{"HSMIC", NULL, "Headset Mic"},
{"Headset Mic", NULL, "Headset Mic Bias"},
/* Digital Mics: DIGIMIC0, DIGIMIC1 with bias */
{"DIGIMIC0", NULL, "Digital0 Mic"},
{"Digital0 Mic", NULL, "Mic Bias 1"},
{"DIGIMIC1", NULL, "Digital1 Mic"},
{"Digital1 Mic", NULL, "Mic Bias 2"},
/* Carkit In: CARKITMIC */
{"CARKITMIC", NULL, "Carkit Mic"},
/* Aux In: AUXL, AUXR */
{"AUXL", NULL, "Line In"},
{"AUXR", NULL, "Line In"},
};
/* Headset jack detection DAPM pins */
static struct snd_soc_jack_pin hs_jack_pins[] = {
{
.pin = "Headset Mic",
.mask = SND_JACK_MICROPHONE,
},
{
.pin = "Headset Stereophone",
.mask = SND_JACK_HEADPHONE,
},
};
/* Headset jack detection gpios */
static struct snd_soc_jack_gpio hs_jack_gpios[] = {
{
.name = "hsdet-gpio",
.report = SND_JACK_HEADSET,
.debounce_time = 200,
},
};
static inline void twl4030_disconnect_pin(struct snd_soc_dapm_context *dapm,
int connected, char *pin)
{
if (!connected)
snd_soc_dapm_disable_pin(dapm, pin);
}
static int omap_twl4030_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_card *card = rtd->card;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct omap_tw4030_pdata *pdata = dev_get_platdata(card->dev);
struct omap_twl4030 *priv = snd_soc_card_get_drvdata(card);
int ret = 0;
/* Headset jack detection only if it is supported */
if (priv->jack_detect > 0) {
hs_jack_gpios[0].gpio = priv->jack_detect;
ret = snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET,
&priv->hs_jack);
if (ret)
return ret;
ret = snd_soc_jack_add_pins(&priv->hs_jack,
ARRAY_SIZE(hs_jack_pins),
hs_jack_pins);
if (ret)
return ret;
ret = snd_soc_jack_add_gpios(&priv->hs_jack,
ARRAY_SIZE(hs_jack_gpios),
hs_jack_gpios);
if (ret)
return ret;
}
/*
* NULL pdata means we booted with DT. In this case the routing is
* provided and the card is fully routed, no need to mark pins.
*/
if (!pdata || !pdata->custom_routing)
return ret;
/* Disable not connected paths if not used */
twl4030_disconnect_pin(dapm, pdata->has_ear, "Earpiece Spk");
twl4030_disconnect_pin(dapm, pdata->has_hf, "Handsfree Spk");
twl4030_disconnect_pin(dapm, pdata->has_hs, "Headset Stereophone");
twl4030_disconnect_pin(dapm, pdata->has_predriv, "Ext Spk");
twl4030_disconnect_pin(dapm, pdata->has_carkit, "Carkit Spk");
twl4030_disconnect_pin(dapm, pdata->has_mainmic, "Main Mic");
twl4030_disconnect_pin(dapm, pdata->has_submic, "Sub Mic");
twl4030_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic");
twl4030_disconnect_pin(dapm, pdata->has_carkitmic, "Carkit Mic");
twl4030_disconnect_pin(dapm, pdata->has_digimic0, "Digital0 Mic");
twl4030_disconnect_pin(dapm, pdata->has_digimic1, "Digital1 Mic");
twl4030_disconnect_pin(dapm, pdata->has_linein, "Line In");
return ret;
}
static int omap_twl4030_card_remove(struct snd_soc_card *card)
{
struct omap_twl4030 *priv = snd_soc_card_get_drvdata(card);
if (priv->jack_detect > 0)
snd_soc_jack_free_gpios(&priv->hs_jack,
ARRAY_SIZE(hs_jack_gpios),
hs_jack_gpios);
return 0;
}
/* Digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link omap_twl4030_dai_links[] = {
{
.name = "TWL4030 HiFi",
.stream_name = "TWL4030 HiFi",
.cpu_dai_name = "omap-mcbsp.2",
.codec_dai_name = "twl4030-hifi",
.platform_name = "omap-mcbsp.2",
.codec_name = "twl4030-codec",
.init = omap_twl4030_init,
.ops = &omap_twl4030_ops,
},
{
.name = "TWL4030 Voice",
.stream_name = "TWL4030 Voice",
.cpu_dai_name = "omap-mcbsp.3",
.codec_dai_name = "twl4030-voice",
.platform_name = "omap-mcbsp.3",
.codec_name = "twl4030-codec",
.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBM_CFM,
},
};
/* Audio machine driver */
static struct snd_soc_card omap_twl4030_card = {
.owner = THIS_MODULE,
.remove = omap_twl4030_card_remove,
.dai_link = omap_twl4030_dai_links,
.num_links = ARRAY_SIZE(omap_twl4030_dai_links),
.dapm_widgets = dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
static int omap_twl4030_probe(struct platform_device *pdev)
{
struct omap_tw4030_pdata *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
struct snd_soc_card *card = &omap_twl4030_card;
struct omap_twl4030 *priv;
int ret = 0;
card->dev = &pdev->dev;
priv = devm_kzalloc(&pdev->dev, sizeof(struct omap_twl4030), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
if (node) {
struct device_node *dai_node;
struct property *prop;
if (snd_soc_of_parse_card_name(card, "ti,model")) {
dev_err(&pdev->dev, "Card name is not provided\n");
return -ENODEV;
}
dai_node = of_parse_phandle(node, "ti,mcbsp", 0);
if (!dai_node) {
dev_err(&pdev->dev, "McBSP node is not provided\n");
return -EINVAL;
}
omap_twl4030_dai_links[0].cpu_dai_name = NULL;
omap_twl4030_dai_links[0].cpu_of_node = dai_node;
omap_twl4030_dai_links[0].platform_name = NULL;
omap_twl4030_dai_links[0].platform_of_node = dai_node;
dai_node = of_parse_phandle(node, "ti,mcbsp-voice", 0);
if (!dai_node) {
card->num_links = 1;
} else {
omap_twl4030_dai_links[1].cpu_dai_name = NULL;
omap_twl4030_dai_links[1].cpu_of_node = dai_node;
omap_twl4030_dai_links[1].platform_name = NULL;
omap_twl4030_dai_links[1].platform_of_node = dai_node;
}
priv->jack_detect = of_get_named_gpio(node,
"ti,jack-det-gpio", 0);
/* Optional: audio routing can be provided */
prop = of_find_property(node, "ti,audio-routing", NULL);
if (prop) {
ret = snd_soc_of_parse_audio_routing(card,
"ti,audio-routing");
if (ret)
return ret;
card->fully_routed = 1;
}
} else if (pdata) {
if (pdata->card_name) {
card->name = pdata->card_name;
} else {
dev_err(&pdev->dev, "Card name is not provided\n");
return -ENODEV;
}
if (!pdata->voice_connected)
card->num_links = 1;
priv->jack_detect = pdata->jack_detect;
} else {
dev_err(&pdev->dev, "Missing pdata\n");
return -ENODEV;
}
snd_soc_card_set_drvdata(card, priv);
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
dev_err(&pdev->dev, "devm_snd_soc_register_card() failed: %d\n",
ret);
return ret;
}
return 0;
}
static const struct of_device_id omap_twl4030_of_match[] = {
{.compatible = "ti,omap-twl4030", },
{ },
};
MODULE_DEVICE_TABLE(of, omap_twl4030_of_match);
static struct platform_driver omap_twl4030_driver = {
.driver = {
.name = "omap-twl4030",
.pm = &snd_soc_pm_ops,
.of_match_table = omap_twl4030_of_match,
},
.probe = omap_twl4030_probe,
};
module_platform_driver(omap_twl4030_driver);
MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
MODULE_DESCRIPTION("ALSA SoC for TI SoC based boards with twl4030 codec");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap-twl4030");
| gpl-2.0 |
TeamOrion-Devices/kernel_asus_grouper | drivers/gpu/drm/nouveau/nv50_graph.c | 390 | 31116 | /*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_grctx.h"
#include "nouveau_dma.h"
#include "nouveau_vm.h"
#include "nv50_evo.h"
struct nv50_graph_engine {
struct nouveau_exec_engine base;
u32 ctxprog[512];
u32 ctxprog_size;
u32 grctx_size;
};
static void
nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
{
const uint32_t mask = 0x00010001;
if (enabled)
nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
else
nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
}
static struct nouveau_channel *
nv50_graph_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
int i;
/* Be sure we're not in the middle of a context switch or bad things
* will happen, such as unloading the wrong pgraph context.
*/
if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
NV_ERROR(dev, "Ctxprog is still running\n");
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
return NULL;
inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && chan->ramin && chan->ramin->vinst == inst)
return chan;
}
return NULL;
}
static int
nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
{
uint32_t fifo = nv_rd32(dev, 0x400500);
nv_wr32(dev, 0x400500, fifo & ~1);
nv_wr32(dev, 0x400784, inst);
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
nv_wr32(dev, 0x400040, 0xffffffff);
(void)nv_rd32(dev, 0x400040);
nv_wr32(dev, 0x400040, 0x00000000);
nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
if (nouveau_wait_for_idle(dev))
nv_wr32(dev, 0x40032c, inst | (1<<31));
nv_wr32(dev, 0x400500, fifo);
return 0;
}
static int
nv50_graph_unload_context(struct drm_device *dev)
{
uint32_t inst;
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
return 0;
inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
nouveau_wait_for_idle(dev);
nv_wr32(dev, 0x400784, inst);
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
nouveau_wait_for_idle(dev);
nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
return 0;
}
static void
nv50_graph_init_reset(struct drm_device *dev)
{
uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
NV_DEBUG(dev, "\n");
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
}
static void
nv50_graph_init_intr(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
nv_wr32(dev, 0x400138, 0xffffffff);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
}
static void
nv50_graph_init_regs__nv(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t units = nv_rd32(dev, 0x1540);
int i;
NV_DEBUG(dev, "\n");
nv_wr32(dev, 0x400804, 0xc0000000);
nv_wr32(dev, 0x406800, 0xc0000000);
nv_wr32(dev, 0x400c04, 0xc0000000);
nv_wr32(dev, 0x401800, 0xc0000000);
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x402000, 0xc0000000);
for (i = 0; i < 16; i++) {
if (units & 1 << i) {
if (dev_priv->chipset < 0xa0) {
nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
} else {
nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
}
}
}
nv_wr32(dev, 0x400108, 0xffffffff);
nv_wr32(dev, 0x400824, 0x00004000);
nv_wr32(dev, 0x400500, 0x00010001);
}
static void
nv50_graph_init_zcull(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
NV_DEBUG(dev, "\n");
switch (dev_priv->chipset & 0xf0) {
case 0x50:
case 0x80:
case 0x90:
nv_wr32(dev, 0x402ca8, 0x00000800);
break;
case 0xa0:
default:
nv_wr32(dev, 0x402cc0, 0x00000000);
if (dev_priv->chipset == 0xa0 ||
dev_priv->chipset == 0xaa ||
dev_priv->chipset == 0xac) {
nv_wr32(dev, 0x402ca8, 0x00000802);
} else {
nv_wr32(dev, 0x402cc0, 0x00000000);
nv_wr32(dev, 0x402ca8, 0x00000002);
}
break;
}
/* zero out zcull regions */
for (i = 0; i < 8; i++) {
nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
}
}
static int
nv50_graph_init_ctxctl(struct drm_device *dev)
{
struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR);
int i;
NV_DEBUG(dev, "\n");
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
for (i = 0; i < pgraph->ctxprog_size; i++)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]);
nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
return 0;
}
static int
nv50_graph_init(struct drm_device *dev, int engine)
{
int ret;
NV_DEBUG(dev, "\n");
nv50_graph_init_reset(dev);
nv50_graph_init_regs__nv(dev);
nv50_graph_init_zcull(dev);
ret = nv50_graph_init_ctxctl(dev);
if (ret)
return ret;
nv50_graph_init_intr(dev);
return 0;
}
static int
nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
return -EBUSY;
}
nv50_graph_unload_context(dev);
nv_wr32(dev, 0x40013c, 0x00000000);
return 0;
}
static int
nv50_graph_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *grctx = NULL;
struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
struct nouveau_grctx ctx = {};
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &grctx);
if (ret)
return ret;
hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
nv_wo32(ramin, hdr + 0x00, 0x00190002);
nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
nv_wo32(ramin, hdr + 0x08, grctx->vinst);
nv_wo32(ramin, hdr + 0x0c, 0);
nv_wo32(ramin, hdr + 0x10, 0);
nv_wo32(ramin, hdr + 0x14, 0x00010000);
ctx.dev = chan->dev;
ctx.mode = NOUVEAU_GRCTX_VALS;
ctx.data = grctx;
nv50_grctx_init(&ctx);
nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
chan->engctx[NVOBJ_ENGINE_GR] = grctx;
return 0;
}
static void
nv50_graph_context_del(struct nouveau_channel *chan, int engine)
{
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
unsigned long flags;
NV_DEBUG(dev, "ch%d\n", chan->id);
if (!chan->ramin)
return;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
pfifo->reassign(dev, false);
nv50_graph_fifo_access(dev, false);
if (nv50_graph_channel(dev) == chan)
nv50_graph_unload_context(dev);
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
nv50_graph_fifo_access(dev, true);
pfifo->reassign(dev, true);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
nouveau_gpuobj_ref(NULL, &grctx);
atomic_dec(&chan->vm->engref[engine]);
chan->engctx[engine] = NULL;
}
static int
nv50_graph_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *obj = NULL;
int ret;
ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
if (ret)
return ret;
obj->engine = 1;
obj->class = class;
nv_wo32(obj, 0x00, class);
nv_wo32(obj, 0x04, 0x00000000);
nv_wo32(obj, 0x08, 0x00000000);
nv_wo32(obj, 0x0c, 0x00000000);
dev_priv->engine.instmem.flush(dev);
ret = nouveau_ramht_insert(chan, handle, obj);
nouveau_gpuobj_ref(NULL, &obj);
return ret;
}
static void
nv50_graph_context_switch(struct drm_device *dev)
{
uint32_t inst;
nv50_graph_unload_context(dev);
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
nv50_graph_do_load_context(dev, inst);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
}
static int
nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
{
struct nouveau_gpuobj *gpuobj;
gpuobj = nouveau_ramht_find(chan, data);
if (!gpuobj)
return -ENOENT;
if (nouveau_notifier_offset(gpuobj, NULL))
return -EINVAL;
chan->nvsw.vblsem = gpuobj;
chan->nvsw.vblsem_offset = ~0;
return 0;
}
static int
nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
{
if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
return -ERANGE;
chan->nvsw.vblsem_offset = data >> 2;
return 0;
}
static int
nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
{
chan->nvsw.vblsem_rval = data;
return 0;
}
static int
nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
return -EINVAL;
drm_vblank_get(dev, data);
chan->nvsw.vblsem_head = data;
list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
return 0;
}
static int
nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
{
nouveau_finish_page_flip(chan, NULL);
return 0;
}
static void
nv50_graph_tlb_flush(struct drm_device *dev, int engine)
{
nv50_vm_flush_engine(dev, 0);
}
static void
nv84_graph_tlb_flush(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
bool idle, timeout = false;
unsigned long flags;
u64 start;
u32 tmp;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
start = ptimer->read(dev);
do {
idle = true;
for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
if ((tmp & 7) == 1)
idle = false;
}
for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
if ((tmp & 7) == 1)
idle = false;
}
for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
if ((tmp & 7) == 1)
idle = false;
}
} while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
if (timeout) {
NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
"0x%08x 0x%08x 0x%08x 0x%08x\n",
nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
}
nv50_vm_flush_engine(dev, 0);
nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
static struct nouveau_enum nv50_mp_exec_error_names[] = {
{ 3, "STACK_UNDERFLOW", NULL },
{ 4, "QUADON_ACTIVE", NULL },
{ 8, "TIMEOUT", NULL },
{ 0x10, "INVALID_OPCODE", NULL },
{ 0x40, "BREAKPOINT", NULL },
{}
};
static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
{ 0x00000001, "NOTIFY" },
{ 0x00000002, "IN" },
{ 0x00000004, "OUT" },
{}
};
static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
{ 0x00000001, "FAULT" },
{}
};
static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
{ 0x00000001, "FAULT" },
{}
};
static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
{ 0x00000001, "FAULT" },
{}
};
/* There must be a *lot* of these. Will take some time to gather them up. */
struct nouveau_enum nv50_data_error_names[] = {
{ 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL },
{ 0x00000004, "INVALID_VALUE", NULL },
{ 0x00000005, "INVALID_ENUM", NULL },
{ 0x00000008, "INVALID_OBJECT", NULL },
{ 0x00000009, "READ_ONLY_OBJECT", NULL },
{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
{ 0x0000000c, "INVALID_BITFIELD", NULL },
{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
{ 0x00000010, "RT_DOUBLE_BIND", NULL },
{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
{}
};
static struct nouveau_bitfield nv50_graph_intr[] = {
{ 0x00000001, "NOTIFY" },
{ 0x00000002, "COMPUTE_QUERY" },
{ 0x00000010, "ILLEGAL_MTHD" },
{ 0x00000020, "ILLEGAL_CLASS" },
{ 0x00000040, "DOUBLE_NOTIFY" },
{ 0x00001000, "CONTEXT_SWITCH" },
{ 0x00010000, "BUFFER_NOTIFY" },
{ 0x00100000, "DATA_ERROR" },
{ 0x00200000, "TRAP" },
{ 0x01000000, "SINGLE_STEP" },
{}
};
static void
nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t units = nv_rd32(dev, 0x1540);
uint32_t addr, mp10, status, pc, oplow, ophigh;
int i;
int mps = 0;
for (i = 0; i < 4; i++) {
if (!(units & 1 << (i+24)))
continue;
if (dev_priv->chipset < 0xa0)
addr = 0x408200 + (tpid << 12) + (i << 7);
else
addr = 0x408100 + (tpid << 11) + (i << 7);
mp10 = nv_rd32(dev, addr + 0x10);
status = nv_rd32(dev, addr + 0x14);
if (!status)
continue;
if (display) {
nv_rd32(dev, addr + 0x20);
pc = nv_rd32(dev, addr + 0x24);
oplow = nv_rd32(dev, addr + 0x70);
ophigh = nv_rd32(dev, addr + 0x74);
NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
"TP %d MP %d: ", tpid, i);
nouveau_enum_print(nv50_mp_exec_error_names, status);
printk(" at %06x warp %d, opcode %08x %08x\n",
pc&0xffffff, pc >> 24,
oplow, ophigh);
}
nv_wr32(dev, addr + 0x10, mp10);
nv_wr32(dev, addr + 0x14, 0);
mps++;
}
if (!mps && display)
NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
"No MPs claiming errors?\n", tpid);
}
static void
nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
uint32_t ustatus_new, int display, const char *name)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int tps = 0;
uint32_t units = nv_rd32(dev, 0x1540);
int i, r;
uint32_t ustatus_addr, ustatus;
for (i = 0; i < 16; i++) {
if (!(units & (1 << i)))
continue;
if (dev_priv->chipset < 0xa0)
ustatus_addr = ustatus_old + (i << 12);
else
ustatus_addr = ustatus_new + (i << 11);
ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
if (!ustatus)
continue;
tps++;
switch (type) {
case 6: /* texture error... unknown for now */
if (display) {
NV_ERROR(dev, "magic set %d:\n", i);
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
nv_rd32(dev, r));
}
break;
case 7: /* MP error */
if (ustatus & 0x00010000) {
nv50_pgraph_mp_trap(dev, i, display);
ustatus &= ~0x00010000;
}
break;
case 8: /* TPDMA error */
{
uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
/* 2d engine destination */
if (ustatus & 0x00000010) {
if (display) {
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
i, e14, e10);
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
i, e0c, e18, e1c, e20, e24);
}
ustatus &= ~0x00000010;
}
/* Render target */
if (ustatus & 0x00000040) {
if (display) {
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
i, e14, e10);
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
i, e0c, e18, e1c, e20, e24);
}
ustatus &= ~0x00000040;
}
/* CUDA memory: l[], g[] or stack. */
if (ustatus & 0x00000080) {
if (display) {
if (e18 & 0x80000000) {
/* g[] read fault? */
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
i, e14, e10 | ((e18 >> 24) & 0x1f));
e18 &= ~0x1f000000;
} else if (e18 & 0xc) {
/* g[] write fault? */
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
i, e14, e10 | ((e18 >> 7) & 0x1f));
e18 &= ~0x00000f80;
} else {
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
i, e14, e10);
}
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
i, e0c, e18, e1c, e20, e24);
}
ustatus &= ~0x00000080;
}
}
break;
}
if (ustatus) {
if (display)
NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
}
nv_wr32(dev, ustatus_addr, 0xc0000000);
}
if (!tps && display)
NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
}
static int
nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
{
u32 status = nv_rd32(dev, 0x400108);
u32 ustatus;
if (!status && display) {
NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
return 1;
}
/* DISPATCH: Relays commands to other units and handles NOTIFY,
* COND, QUERY. If you get a trap from it, the command is still stuck
* in DISPATCH and you need to do something about it. */
if (status & 0x001) {
ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
if (!ustatus && display) {
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
}
nv_wr32(dev, 0x400500, 0x00000000);
/* Known to be triggered by screwed up NOTIFY and COND... */
if (ustatus & 0x00000001) {
u32 addr = nv_rd32(dev, 0x400808);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
u32 datal = nv_rd32(dev, 0x40080c);
u32 datah = nv_rd32(dev, 0x400810);
u32 class = nv_rd32(dev, 0x400814);
u32 r848 = nv_rd32(dev, 0x400848);
NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
if (display && (addr & 0x80000000)) {
NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
"subc %d class 0x%04x mthd 0x%04x "
"data 0x%08x%08x "
"400808 0x%08x 400848 0x%08x\n",
chid, inst, subc, class, mthd, datah,
datal, addr, r848);
} else
if (display) {
NV_INFO(dev, "PGRAPH - no stuck command?\n");
}
nv_wr32(dev, 0x400808, 0);
nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
nv_wr32(dev, 0x400848, 0);
ustatus &= ~0x00000001;
}
if (ustatus & 0x00000002) {
u32 addr = nv_rd32(dev, 0x40084c);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
u32 data = nv_rd32(dev, 0x40085c);
u32 class = nv_rd32(dev, 0x400814);
NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
if (display && (addr & 0x80000000)) {
NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
"subc %d class 0x%04x mthd 0x%04x "
"data 0x%08x 40084c 0x%08x\n",
chid, inst, subc, class, mthd,
data, addr);
} else
if (display) {
NV_INFO(dev, "PGRAPH - no stuck command?\n");
}
nv_wr32(dev, 0x40084c, 0);
ustatus &= ~0x00000002;
}
if (ustatus && display) {
NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
"0x%08x)\n", ustatus);
}
nv_wr32(dev, 0x400804, 0xc0000000);
nv_wr32(dev, 0x400108, 0x001);
status &= ~0x001;
if (!status)
return 0;
}
/* M2MF: Memory to memory copy engine. */
if (status & 0x002) {
u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
if (display) {
NV_INFO(dev, "PGRAPH - TRAP_M2MF");
nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
printk("\n");
NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
}
/* No sane way found yet -- just reset the bugger. */
nv_wr32(dev, 0x400040, 2);
nv_wr32(dev, 0x400040, 0);
nv_wr32(dev, 0x406800, 0xc0000000);
nv_wr32(dev, 0x400108, 0x002);
status &= ~0x002;
}
/* VFETCH: Fetches data from vertex buffers. */
if (status & 0x004) {
u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
if (display) {
NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
printk("\n");
NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
}
nv_wr32(dev, 0x400c04, 0xc0000000);
nv_wr32(dev, 0x400108, 0x004);
status &= ~0x004;
}
/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
if (status & 0x008) {
ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
if (display) {
NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
printk("\n");
NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
}
/* No sane way found yet -- just reset the bugger. */
nv_wr32(dev, 0x400040, 0x80);
nv_wr32(dev, 0x400040, 0);
nv_wr32(dev, 0x401800, 0xc0000000);
nv_wr32(dev, 0x400108, 0x008);
status &= ~0x008;
}
/* CCACHE: Handles code and c[] caches and fills them. */
if (status & 0x010) {
ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
if (display) {
NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
printk("\n");
NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
nv_rd32(dev, 0x40501c));
}
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x400108, 0x010);
status &= ~0x010;
}
/* Unknown, not seen yet... 0x402000 is the only trap status reg
* remaining, so try to handle it anyway. Perhaps related to that
* unknown DMA slot on tesla? */
if (status & 0x20) {
ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
if (display)
NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
nv_wr32(dev, 0x402000, 0xc0000000);
/* no status modifiction on purpose */
}
/* TEXTURE: CUDA texturing units */
if (status & 0x040) {
nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
"PGRAPH - TRAP_TEXTURE");
nv_wr32(dev, 0x400108, 0x040);
status &= ~0x040;
}
/* MP: CUDA execution engines. */
if (status & 0x080) {
nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
"PGRAPH - TRAP_MP");
nv_wr32(dev, 0x400108, 0x080);
status &= ~0x080;
}
/* TPDMA: Handles TP-initiated uncached memory accesses:
* l[], g[], stack, 2d surfaces, render targets. */
if (status & 0x100) {
nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
"PGRAPH - TRAP_TPDMA");
nv_wr32(dev, 0x400108, 0x100);
status &= ~0x100;
}
if (status) {
if (display)
NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
nv_wr32(dev, 0x400108, status);
}
return 1;
}
int
nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->ramin)
continue;
if (inst == chan->ramin->vinst)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
return i;
}
static void
nv50_graph_isr(struct drm_device *dev)
{
u32 stat;
while ((stat = nv_rd32(dev, 0x400100))) {
u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
u32 chid = nv50_graph_isr_chid(dev, inst);
u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
u32 class = nv_rd32(dev, 0x400814);
u32 show = stat;
if (stat & 0x00000010) {
if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
mthd, data))
show &= ~0x00000010;
}
if (stat & 0x00001000) {
nv_wr32(dev, 0x400500, 0x00000000);
nv_wr32(dev, 0x400100, 0x00001000);
nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
nv50_graph_context_switch(dev);
stat &= ~0x00001000;
show &= ~0x00001000;
}
show = (show && nouveau_ratelimit()) ? show : 0;
if (show & 0x00100000) {
u32 ecode = nv_rd32(dev, 0x400110);
NV_INFO(dev, "PGRAPH - DATA_ERROR ");
nouveau_enum_print(nv50_data_error_names, ecode);
printk("\n");
}
if (stat & 0x00200000) {
if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
show &= ~0x00200000;
}
nv_wr32(dev, 0x400100, stat);
nv_wr32(dev, 0x400500, 0x00010001);
if (show) {
NV_INFO(dev, "PGRAPH -");
nouveau_bitfield_print(nv50_graph_intr, show);
printk("\n");
NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
"class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, inst, subc, class, mthd, data);
nv50_fb_vm_trap(dev, 1);
}
}
if (nv_rd32(dev, 0x400824) & (1 << 31))
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
}
static void
nv50_graph_destroy(struct drm_device *dev, int engine)
{
struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
NVOBJ_ENGINE_DEL(dev, GR);
nouveau_irq_unregister(dev, 12);
kfree(pgraph);
}
int
nv50_graph_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_graph_engine *pgraph;
struct nouveau_grctx ctx = {};
int ret;
pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
if (!pgraph)
return -ENOMEM;
ctx.dev = dev;
ctx.mode = NOUVEAU_GRCTX_PROG;
ctx.data = pgraph->ctxprog;
ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
ret = nv50_grctx_init(&ctx);
if (ret) {
NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
kfree(pgraph);
return 0;
}
pgraph->grctx_size = ctx.ctxvals_pos * 4;
pgraph->ctxprog_size = ctx.ctxprog_len;
pgraph->base.destroy = nv50_graph_destroy;
pgraph->base.init = nv50_graph_init;
pgraph->base.fini = nv50_graph_fini;
pgraph->base.context_new = nv50_graph_context_new;
pgraph->base.context_del = nv50_graph_context_del;
pgraph->base.object_new = nv50_graph_object_new;
if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
pgraph->base.tlb_flush = nv50_graph_tlb_flush;
else
pgraph->base.tlb_flush = nv84_graph_tlb_flush;
nouveau_irq_register(dev, 12, nv50_graph_isr);
/* NVSW really doesn't live here... */
NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
/* tesla */
if (dev_priv->chipset == 0x50)
NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
else
if (dev_priv->chipset < 0xa0)
NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
else {
switch (dev_priv->chipset) {
case 0xa0:
case 0xaa:
case 0xac:
NVOBJ_CLASS(dev, 0x8397, GR);
break;
case 0xa3:
case 0xa5:
case 0xa8:
NVOBJ_CLASS(dev, 0x8597, GR);
break;
case 0xaf:
NVOBJ_CLASS(dev, 0x8697, GR);
break;
}
}
/* compute */
NVOBJ_CLASS(dev, 0x50c0, GR);
if (dev_priv->chipset > 0xa0 &&
dev_priv->chipset != 0xaa &&
dev_priv->chipset != 0xac)
NVOBJ_CLASS(dev, 0x85c0, GR);
return 0;
}
| gpl-2.0 |
dekkyy1/3.1.1_kernel | arch/um/sys-x86_64/ptrace.c | 390 | 4558 | /*
* Copyright 2003 PathScale, Inc.
* Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*
* Licensed under the GPL
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/errno.h>
#define __FRAME_OFFSETS
#include <asm/ptrace.h>
#include <asm/uaccess.h>
/*
* determines which flags the user has access to.
* 1 = access 0 = no access
*/
#define FLAG_MASK 0x44dd5UL
int putreg(struct task_struct *child, int regno, unsigned long value)
{
unsigned long tmp;
#ifdef TIF_IA32
/*
* Some code in the 64bit emulation may not be 64bit clean.
* Don't take any chances.
*/
if (test_tsk_thread_flag(child, TIF_IA32))
value &= 0xffffffff;
#endif
switch (regno) {
case FS:
case GS:
case DS:
case ES:
case SS:
case CS:
if (value && (value & 3) != 3)
return -EIO;
value &= 0xffff;
break;
case FS_BASE:
case GS_BASE:
if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
return -EIO;
break;
case EFLAGS:
value &= FLAG_MASK;
tmp = PT_REGS_EFLAGS(&child->thread.regs) & ~FLAG_MASK;
value |= tmp;
break;
}
PT_REGS_SET(&child->thread.regs, regno, value);
return 0;
}
int poke_user(struct task_struct *child, long addr, long data)
{
if ((addr & 3) || addr < 0)
return -EIO;
if (addr < MAX_REG_OFFSET)
return putreg(child, addr, data);
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
if ((addr == 4) || (addr == 5))
return -EIO;
child->thread.arch.debugregs[addr] = data;
return 0;
}
return -EIO;
}
unsigned long getreg(struct task_struct *child, int regno)
{
unsigned long retval = ~0UL;
switch (regno) {
case FS:
case GS:
case DS:
case ES:
case SS:
case CS:
retval = 0xffff;
/* fall through */
default:
retval &= PT_REG(&child->thread.regs, regno);
#ifdef TIF_IA32
if (test_tsk_thread_flag(child, TIF_IA32))
retval &= 0xffffffff;
#endif
}
return retval;
}
int peek_user(struct task_struct *child, long addr, long data)
{
/* read the word at location addr in the USER area. */
unsigned long tmp;
if ((addr & 3) || addr < 0)
return -EIO;
tmp = 0; /* Default return condition */
if (addr < MAX_REG_OFFSET)
tmp = getreg(child, addr);
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
tmp = child->thread.arch.debugregs[addr];
}
return put_user(tmp, (unsigned long *) data);
}
/* XXX Mostly copied from sys-i386 */
int is_syscall(unsigned long addr)
{
unsigned short instr;
int n;
n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
if (n) {
/*
* access_process_vm() grants access to vsyscall and stub,
* while copy_from_user doesn't. Maybe access_process_vm is
* slow, but that doesn't matter, since it will be called only
* in case of singlestepping, if copy_from_user failed.
*/
n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
if (n != sizeof(instr)) {
printk("is_syscall : failed to read instruction from "
"0x%lx\n", addr);
return 1;
}
}
/* sysenter */
return instr == 0x050f;
}
static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
long fpregs[HOST_FP_SIZE];
BUG_ON(sizeof(*buf) != sizeof(fpregs));
err = save_fp_registers(userspace_pid[cpu], fpregs);
if (err)
return err;
n = copy_to_user(buf, fpregs, sizeof(fpregs));
if (n > 0)
return -EFAULT;
return n;
}
static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int n, cpu = ((struct thread_info *) child->stack)->cpu;
long fpregs[HOST_FP_SIZE];
BUG_ON(sizeof(*buf) != sizeof(fpregs));
n = copy_from_user(fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;
return restore_fp_registers(userspace_pid[cpu], fpregs);
}
long subarch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret = -EIO;
void __user *datap = (void __user *) data;
switch (request) {
case PTRACE_GETFPREGS: /* Get the child FPU state. */
ret = get_fpregs(datap, child);
break;
case PTRACE_SETFPREGS: /* Set the child FPU state. */
ret = set_fpregs(datap, child);
break;
case PTRACE_ARCH_PRCTL:
/* XXX Calls ptrace on the host - needs some SMP thinking */
ret = arch_prctl(child, data, (void __user *) addr);
break;
}
return ret;
}
| gpl-2.0 |
smaeul/kernel_samsung_tuna | net/sctp/sm_statefuns.c | 646 | 197491 | /* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 Intel Corp.
* Copyright (c) 2002 Nokia Corp.
*
* This is part of the SCTP Linux Kernel Implementation.
*
* These are the state functions for the state machine.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Mathew Kotowsky <kotowsky@sctp.org>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Jon Grimm <jgrimm@us.ibm.com>
* Hui Huang <hui.huang@nokia.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/inet_ecn.h>
#include <linux/skbuff.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/structs.h>
static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
const void *payload,
size_t paylen);
static int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands);
static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk);
static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
__be16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport);
static sctp_disposition_t sctp_sf_abort_violation(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen);
static sctp_disposition_t sctp_sf_violation_chunklen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_paramlen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, void *ext,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_ctsn(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_chunk(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
struct sctp_chunk *chunk);
static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
/* Small helper function that checks if the chunk length
* is of the appropriate length. The 'required_length' argument
* is set to be the size of a specific chunk we are testing.
* Return Values: 1 = Valid length
* 0 = Invalid length
*
*/
static inline int
sctp_chunk_length_valid(struct sctp_chunk *chunk,
__u16 required_length)
{
__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
if (unlikely(chunk_length < required_length))
return 0;
return 1;
}
/**********************************************************
* These are the state functions for handling chunk events.
**********************************************************/
/*
* Process the final SHUTDOWN COMPLETE.
*
* Section: 4 (C) (diagram), 9.2
* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint will verify
* that it is in SHUTDOWN-ACK-SENT state, if it is not the chunk should be
* discarded. If the endpoint is in the SHUTDOWN-ACK-SENT state the endpoint
* should stop the T2-shutdown timer and remove all knowledge of the
* association (and thus the association enters the CLOSED state).
*
* Verification Tag: 8.5.1(C), sctpimpguide 2.41.
* C) Rules for packet carrying SHUTDOWN COMPLETE:
* ...
* - The receiver of a SHUTDOWN COMPLETE shall accept the packet
* if the Verification Tag field of the packet matches its own tag and
* the T bit is not set
* OR
* it is set to its peer's tag and the T bit is set in the Chunk
* Flags.
* Otherwise, the receiver MUST silently discard the packet
* and take no further action. An endpoint MUST ignore the
* SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* RFC 2960 6.10 Bundling
*
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* RFC 2960 10.2 SCTP-to-ULP
*
* H) SHUTDOWN COMPLETE notification
*
* When SCTP completes the shutdown procedures (section 9.2) this
* notification is passed to the upper layer.
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
0, 0, 0, NULL, GFP_ATOMIC);
if (ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
/* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint
* will verify that it is in SHUTDOWN-ACK-SENT state, if it is
* not the chunk should be discarded. If the endpoint is in
* the SHUTDOWN-ACK-SENT state the endpoint should stop the
* T2-shutdown timer and remove all knowledge of the
* association (and thus the association enters the CLOSED
* state).
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
/*
* Respond to a normal INIT chunk.
* We are the side that is being asked for an association.
*
* Section: 5.1 Normal Establishment of an Association, B
* B) "Z" shall respond immediately with an INIT ACK chunk. The
* destination IP address of the INIT ACK MUST be set to the source
* IP address of the INIT to which this INIT ACK is responding. In
* the response, besides filling in other parameters, "Z" must set the
* Verification Tag field to Tag_A, and also provide its own
* Verification Tag (Tag_Z) in the Initiate Tag field.
*
* Verification Tag: Must be 0.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *repl;
struct sctp_association *new_asoc;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
int len;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*
* IG Section 2.11.2
* Furthermore, we require that the receiver of an INIT chunk MUST
* enforce these rules by silently discarding an arriving packet
* with an INIT chunk that is bundled with other chunks.
*/
if (!chunk->singleton)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
}
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
if (chunk->sctp_hdr->vtag != 0)
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* Make sure that the INIT chunk has a valid length.
* Normally, this would cause an ABORT with a Protocol Violation
* error, but since we don't have an association, we'll
* just discard the packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* If the INIT is coming toward a closing socket, we'll send back
* and ABORT. Essentially, this catches the race of INIT being
* backloged to the socket at the same time as the user isses close().
* Since the socket and all its associations are going away, we
* can treat this OOTB
*/
if (sctp_sstate(ep->base.sk, CLOSING))
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
sctp_chunk_free(err_chunk);
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
}
} else {
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
commands);
}
}
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data;
/* Tag the variable length parameters. */
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
if (!new_asoc)
goto nomem;
if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
sctp_scope(sctp_source(chunk)),
GFP_ATOMIC) < 0)
goto nomem_init;
/* The call, sctp_process_init(), can fail on memory allocation. */
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC))
goto nomem_init;
/* B) "Z" shall respond immediately with an INIT ACK chunk. */
/* If there are errors need to be reported for unknown parameters,
* make sure to reserve enough room in the INIT ACK for them.
*/
len = 0;
if (err_chunk)
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem_init;
/* If there are errors need to be reported for unknown parameters,
* include them in the outgoing INIT ACK as "Unrecognized parameter"
* parameter.
*/
if (err_chunk) {
/* Get the "Unrecognized parameter" parameter(s) out of the
* ERROR chunk generated by sctp_verify_init(). Since the
* error cause code for "unknown parameter" and the
* "Unrecognized parameter" type is the same, we can
* construct the parameters in INIT ACK by copying the
* ERROR causes over.
*/
unk_param = (sctp_unrecognized_param_t *)
((__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t));
/* Replace the cause code with the "Unrecognized parameter"
* parameter type.
*/
sctp_addto_chunk(repl, len, unk_param);
sctp_chunk_free(err_chunk);
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/*
* Note: After sending out INIT ACK with the State Cookie parameter,
* "Z" MUST NOT allocate any resources, nor keep any states for the
* new association. Otherwise, "Z" will be vulnerable to resource
* attacks.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
nomem_init:
sctp_association_free(new_asoc);
nomem:
if (err_chunk)
sctp_chunk_free(err_chunk);
return SCTP_DISPOSITION_NOMEM;
}
/*
* Respond to a normal INIT ACK chunk.
* We are the side that is initiating the association.
*
* Section: 5.1 Normal Establishment of an Association, C
* C) Upon reception of the INIT ACK from "Z", "A" shall stop the T1-init
* timer and leave COOKIE-WAIT state. "A" shall then send the State
* Cookie received in the INIT ACK chunk in a COOKIE ECHO chunk, start
* the T1-cookie timer, and enter the COOKIE-ECHOED state.
*
* Note: The COOKIE ECHO chunk can be bundled with any pending outbound
* DATA chunks, but it MUST be the first chunk in the packet and
* until the COOKIE ACK is returned the sender MUST NOT send any
* other packets to the peer.
*
* Verification Tag: 3.3.3
* If the value of the Initiate Tag in a received INIT ACK chunk is
* found to be 0, the receiver MUST treat it as an error and close the
* association by transmitting an ABORT.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_init_chunk_t *initchunk;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
/* Make sure that the INIT-ACK chunk has a valid length */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
sctp_error_t error = SCTP_ERROR_NO_RESOURCE;
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes. If there are no causes,
* then there wasn't enough memory. Just terminate
* the association.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
sctp_chunk_free(err_chunk);
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
error = SCTP_ERROR_INV_PARAM;
}
}
/* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the
* handling of malformed packets should not result in
* tearing down the association.
*
* This means that if we only want to abort associations
* in an authenticated way (i.e AUTH+ABORT), then we
* can't destroy this association just because the packet
* was malformed.
*/
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
asoc, chunk->transport);
}
/* Tag the variable length parameters. Note that we never
* convert the parameters in an INIT chunk.
*/
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
initchunk = (sctp_init_chunk_t *) chunk->chunk_hdr;
sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT,
SCTP_PEER_INIT(initchunk));
/* Reset init error count upon receipt of INIT-ACK. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
/* 5.1 C) "A" shall stop the T1-init timer and leave
* COOKIE-WAIT state. "A" shall then ... start the T1-cookie
* timer, and enter the COOKIE-ECHOED state.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_ECHOED));
/* SCTP-AUTH: genereate the assocition shared keys so that
* we can potentially signe the COOKIE-ECHO.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
/* 5.1 C) "A" shall then send the State Cookie received in the
* INIT ACK chunk in a COOKIE ECHO chunk, ...
*/
/* If there is any errors to report, send the ERROR chunk generated
* for unknown parameters as well.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO,
SCTP_CHUNK(err_chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Respond to a normal COOKIE ECHO chunk.
* We are the side that is being asked for an association.
*
* Section: 5.1 Normal Establishment of an Association, D
* D) Upon reception of the COOKIE ECHO chunk, Endpoint "Z" will reply
* with a COOKIE ACK chunk after building a TCB and moving to
* the ESTABLISHED state. A COOKIE ACK chunk may be bundled with
* any pending DATA chunks (and/or SACK chunks), but the COOKIE ACK
* chunk MUST be the first chunk in the packet.
*
* IMPLEMENTATION NOTE: An implementation may choose to send the
* Communication Up notification to the SCTP user upon reception
* of a valid COOKIE ECHO chunk.
*
* Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
* D) Rules for packet carrying a COOKIE ECHO
*
* - When sending a COOKIE ECHO, the endpoint MUST use the value of the
* Initial Tag received in the INIT ACK.
*
* - The receiver of a COOKIE ECHO follows the procedures in Section 5.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_association *new_asoc;
sctp_init_chunk_t *peer_init;
struct sctp_chunk *repl;
struct sctp_ulpevent *ev, *ai_ev = NULL;
int error = 0;
struct sctp_chunk *err_chk_p;
struct sock *sk;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
}
/* Make sure that the COOKIE_ECHO chunk has a valid length.
* In this case, we check that we have enough for at least a
* chunk header. More detailed verification is done
* in sctp_unpack_cookie().
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* If the endpoint is not listening or if the number of associations
* on the TCP-style socket exceed the max backlog, respond with an
* ABORT.
*/
sk = ep->base.sk;
if (!sctp_sstate(sk, LISTENING) ||
(sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr =
(struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
/* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint
* "Z" will reply with a COOKIE ACK chunk after building a TCB
* and moving to the ESTABLISHED state.
*/
new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
&err_chk_p);
/* FIXME:
* If the re-build failed, what is the proper error path
* from here?
*
* [We should abort the association. --piggy]
*/
if (!new_asoc) {
/* FIXME: Several errors are possible. A bad cookie should
* be silently discarded, but think about logging it too.
*/
switch (error) {
case -SCTP_IERROR_NOMEM:
goto nomem;
case -SCTP_IERROR_STALE_COOKIE:
sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
err_chk_p);
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
case -SCTP_IERROR_BAD_SIG:
default:
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
}
/* Delay state machine commands until later.
*
* Re-build the bind address for the association is done in
* the sctp_unpack_cookie() already.
*/
/* This is a brand-new association, so these are not yet side
* effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk,
&chunk->subh.cookie_hdr->c.peer_addr,
peer_init, GFP_ATOMIC))
goto nomem_init;
/* SCTP-AUTH: Now that we've populate required fields in
* sctp_process_init, set up the assocaition shared keys as
* necessary so that we can potentially authenticate the ACK
*/
error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC);
if (error)
goto nomem_init;
/* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
* is supposed to be authenticated and we have to do delayed
* authentication. We've just recreated the association using
* the information in the cookie and now it's much easier to
* do the authentication.
*/
if (chunk->auth_chunk) {
struct sctp_chunk auth;
sctp_ierror_t ret;
/* set-up our fake chunk so that we can process it */
auth.skb = chunk->auth_chunk;
auth.asoc = chunk->asoc;
auth.sctp_hdr = chunk->sctp_hdr;
auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
sizeof(sctp_chunkhdr_t));
skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
auth.transport = chunk->transport;
ret = sctp_sf_authenticate(ep, new_asoc, type, &auth);
/* We can now safely free the auth_chunk clone */
kfree_skb(chunk->auth_chunk);
if (ret != SCTP_IERROR_NO_ERROR) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem_init;
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose to
* send the Communication Up notification to the SCTP user
* upon reception of a valid COOKIE ECHO chunk.
*/
ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0,
new_asoc->c.sinit_num_ostreams,
new_asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem_ev;
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*/
if (new_asoc->peer.adaptation_ind) {
ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc,
GFP_ATOMIC);
if (!ai_ev)
goto nomem_aiev;
}
/* Add all the state machine commands now since we've created
* everything. This way we don't introduce memory corruptions
* during side-effect processing and correclty count established
* associations.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (new_asoc->autoclose)
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* This will send the COOKIE ACK */
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/* Queue the ASSOC_CHANGE event */
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Send up the Adaptation Layer Indication event */
if (ai_ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ai_ev));
return SCTP_DISPOSITION_CONSUME;
nomem_aiev:
sctp_ulpevent_free(ev);
nomem_ev:
sctp_chunk_free(repl);
nomem_init:
sctp_association_free(new_asoc);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Respond to a normal COOKIE ACK chunk.
* We are the side that is being asked for an association.
*
* RFC 2960 5.1 Normal Establishment of an Association
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move from the
* COOKIE-ECHOED state to the ESTABLISHED state, stopping the T1-cookie
* timer. It may also notify its ULP about the successful
* establishment of the association with a Communication Up
* notification (see Section 10).
*
* Verification Tag:
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Verify that the chunk length for the COOKIE-ACK is OK.
* If we don't do this, any bundled chunks may be junked.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Reset init error count upon receipt of COOKIE-ACK,
* to avoid problems with the managemement of this
* counter in stale cookie situations when a transition back
* from the COOKIE-ECHOED state to the COOKIE-WAIT
* state is performed.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
/* RFC 2960 5.1 Normal Establishment of an Association
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move
* from the COOKIE-ECHOED state to the ESTABLISHED state,
* stopping the T1-cookie timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (asoc->autoclose)
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* It may also notify its ULP about the successful
* establishment of the association with a Communication Up
* notification (see Section 10).
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP,
0, asoc->c.sinit_num_ostreams,
asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*/
if (asoc->peer.adaptation_ind) {
ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
if (!ev)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
}
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Generate and sendout a heartbeat packet. */
static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
struct sctp_chunk *reply;
/* Send a heartbeat to our peer. */
reply = sctp_make_heartbeat(asoc, transport);
if (!reply)
return SCTP_DISPOSITION_NOMEM;
/* Set rto_pending indicating that an RTT measurement
* is started with this heartbeat chunk.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING,
SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
}
/* Generate a HEARTBEAT packet on the given transport. */
sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
/* Section 3.3.5.
* The Sender-specific Heartbeat Info field should normally include
* information about the sender's current time when this HEARTBEAT
* chunk is sent and the destination transport address to which this
* HEARTBEAT is sent (see Section 8.3).
*/
if (transport->param_flags & SPP_HB_ENABLE) {
if (SCTP_DISPOSITION_NOMEM ==
sctp_sf_heartbeat(ep, asoc, type, arg,
commands))
return SCTP_DISPOSITION_NOMEM;
/* Set transport error counter and association error counter
* when sending heartbeat.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
SCTP_TRANSPORT(transport));
}
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an heartbeat request.
*
* Section: 8.3 Path Heartbeat
* The receiver of the HEARTBEAT should immediately respond with a
* HEARTBEAT ACK that contains the Heartbeat Information field copied
* from the received HEARTBEAT chunk.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* When receiving an SCTP packet, the endpoint MUST ensure that the
* value in the Verification Tag field of the received SCTP packet
* matches its own Tag. If the received Verification Tag value does not
* match the receiver's own tag value, the receiver shall silently
* discard the packet and shall not process it any further except for
* those cases listed in Section 8.5.1 below.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *reply;
size_t paylen = 0;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the HEARTBEAT chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* 8.3 The receiver of the HEARTBEAT should immediately
* respond with a HEARTBEAT ACK that contains the Heartbeat
* Information field copied from the received HEARTBEAT chunk.
*/
chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data;
paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
if (!pskb_pull(chunk->skb, paylen))
goto nomem;
reply = sctp_make_heartbeat_ack(asoc, chunk,
chunk->subh.hb_hdr, paylen);
if (!reply)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process the returning HEARTBEAT ACK.
*
* Section: 8.3 Path Heartbeat
* Upon the receipt of the HEARTBEAT ACK, the sender of the HEARTBEAT
* should clear the error counter of the destination transport
* address to which the HEARTBEAT was sent, and mark the destination
* transport address as active if it is not so marked. The endpoint may
* optionally report to the upper layer when an inactive destination
* address is marked as active due to the reception of the latest
* HEARTBEAT ACK. The receiver of the HEARTBEAT ACK must also
* clear the association overall error count as well (as defined
* in section 8.1).
*
* The receiver of the HEARTBEAT ACK should also perform an RTT
* measurement for that destination transport address using the time
* value carried in the HEARTBEAT ACK chunk.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
union sctp_addr from_addr;
struct sctp_transport *link;
sctp_sender_hb_info_t *hbinfo;
unsigned long max_interval;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the HEARTBEAT-ACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
sizeof(sctp_sender_hb_info_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
/* Make sure that the length of the parameter is what we expect */
if (ntohs(hbinfo->param_hdr.length) !=
sizeof(sctp_sender_hb_info_t)) {
return SCTP_DISPOSITION_DISCARD;
}
from_addr = hbinfo->daddr;
link = sctp_assoc_lookup_paddr(asoc, &from_addr);
/* This should never happen, but lets log it if so. */
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
if (net_ratelimit())
pr_warn("%s association %p could not find address %pI6\n",
__func__,
asoc,
&from_addr.v6.sin6_addr);
} else {
if (net_ratelimit())
pr_warn("%s association %p could not find address %pI4\n",
__func__,
asoc,
&from_addr.v4.sin_addr.s_addr);
}
return SCTP_DISPOSITION_DISCARD;
}
/* Validate the 64-bit random nonce. */
if (hbinfo->hb_nonce != link->hb_nonce)
return SCTP_DISPOSITION_DISCARD;
max_interval = link->hbinterval + link->rto;
/* Check if the timestamp looks valid. */
if (time_after(hbinfo->sent_at, jiffies) ||
time_after(jiffies, hbinfo->sent_at + max_interval)) {
SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp "
"received for transport: %p\n",
__func__, link);
return SCTP_DISPOSITION_DISCARD;
}
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of
* the HEARTBEAT should clear the error counter of the
* destination transport address to which the HEARTBEAT was
* sent and mark the destination transport address as active if
* it is not so marked.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link));
return SCTP_DISPOSITION_CONSUME;
}
/* Helper function to send out an abort for the restart
* condition.
*/
static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
struct sctp_chunk *init,
sctp_cmd_seq_t *commands)
{
int len;
struct sctp_packet *pkt;
union sctp_addr_param *addrparm;
struct sctp_errhdr *errhdr;
struct sctp_endpoint *ep;
char buffer[sizeof(struct sctp_errhdr)+sizeof(union sctp_addr_param)];
struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family);
/* Build the error on the stack. We are way to malloc crazy
* throughout the code today.
*/
errhdr = (struct sctp_errhdr *)buffer;
addrparm = (union sctp_addr_param *)errhdr->variable;
/* Copy into a parm format. */
len = af->to_addr_param(ssa, addrparm);
len += sizeof(sctp_errhdr_t);
errhdr->cause = SCTP_ERROR_RESTART;
errhdr->length = htons(len);
/* Assign to the control socket. */
ep = sctp_sk((sctp_get_ctl_sock()))->ep;
/* Association is NULL since this may be a restart attack and we
* want to send back the attacker's vtag.
*/
pkt = sctp_abort_pkt_new(ep, NULL, init, errhdr, len);
if (!pkt)
goto out;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
/* Discard the rest of the inbound packet. */
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
out:
/* Even if there is no memory, treat as a failure so
* the packet will get dropped.
*/
return 0;
}
static bool list_has_sctp_addr(const struct list_head *list,
union sctp_addr *ipaddr)
{
struct sctp_transport *addr;
list_for_each_entry(addr, list, transports) {
if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
return true;
}
return false;
}
/* A restart is occurring, check to make sure no new addresses
* are being added as we may be under a takeover attack.
*/
static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
const struct sctp_association *asoc,
struct sctp_chunk *init,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *new_addr;
int ret = 1;
/* Implementor's Guide - Section 5.2.2
* ...
* Before responding the endpoint MUST check to see if the
* unexpected INIT adds new addresses to the association. If new
* addresses are added to the association, the endpoint MUST respond
* with an ABORT..
*/
/* Search through all current addresses and make sure
* we aren't adding any new ones.
*/
list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
transports) {
if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
&new_addr->ipaddr)) {
sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
commands);
ret = 0;
break;
}
}
/* Return success if all addresses were found. */
return ret;
}
/* Populate the verification/tie tags based on overlapping INIT
* scenario.
*
* Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state.
*/
static void sctp_tietags_populate(struct sctp_association *new_asoc,
const struct sctp_association *asoc)
{
switch (asoc->state) {
/* 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State */
case SCTP_STATE_COOKIE_WAIT:
new_asoc->c.my_vtag = asoc->c.my_vtag;
new_asoc->c.my_ttag = asoc->c.my_vtag;
new_asoc->c.peer_ttag = 0;
break;
case SCTP_STATE_COOKIE_ECHOED:
new_asoc->c.my_vtag = asoc->c.my_vtag;
new_asoc->c.my_ttag = asoc->c.my_vtag;
new_asoc->c.peer_ttag = asoc->c.peer_vtag;
break;
/* 5.2.2 Unexpected INIT in States Other than CLOSED, COOKIE-ECHOED,
* COOKIE-WAIT and SHUTDOWN-ACK-SENT
*/
default:
new_asoc->c.my_ttag = asoc->c.my_vtag;
new_asoc->c.peer_ttag = asoc->c.peer_vtag;
break;
}
/* Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of
* outbound streams) into the INIT ACK and cookie.
*/
new_asoc->rwnd = asoc->rwnd;
new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams;
new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams;
new_asoc->c.initial_tsn = asoc->c.initial_tsn;
}
/*
* Compare vtag/tietag values to determine unexpected COOKIE-ECHO
* handling action.
*
* RFC 2960 5.2.4 Handle a COOKIE ECHO when a TCB exists.
*
* Returns value representing action to be taken. These action values
* correspond to Action/Description values in RFC 2960, Table 2.
*/
static char sctp_tietags_compare(struct sctp_association *new_asoc,
const struct sctp_association *asoc)
{
/* In this case, the peer may have restarted. */
if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
(asoc->c.peer_vtag != new_asoc->c.peer_vtag) &&
(asoc->c.my_vtag == new_asoc->c.my_ttag) &&
(asoc->c.peer_vtag == new_asoc->c.peer_ttag))
return 'A';
/* Collision case B. */
if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
((asoc->c.peer_vtag != new_asoc->c.peer_vtag) ||
(0 == asoc->c.peer_vtag))) {
return 'B';
}
/* Collision case D. */
if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
(asoc->c.peer_vtag == new_asoc->c.peer_vtag))
return 'D';
/* Collision case C. */
if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
(asoc->c.peer_vtag == new_asoc->c.peer_vtag) &&
(0 == new_asoc->c.my_ttag) &&
(0 == new_asoc->c.peer_ttag))
return 'C';
/* No match to any of the special cases; discard this packet. */
return 'E';
}
/* Common helper routine for both duplicate and simulataneous INIT
* chunk handling.
*/
static sctp_disposition_t sctp_sf_do_unexpected_init(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
sctp_disposition_t retval;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *repl;
struct sctp_association *new_asoc;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
int len;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*
* IG Section 2.11.2
* Furthermore, we require that the receiver of an INIT chunk MUST
* enforce these rules by silently discarding an arriving packet
* with an INIT chunk that is bundled with other chunks.
*/
if (!chunk->singleton)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
if (chunk->sctp_hdr->vtag != 0)
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* Make sure that the INIT chunk has a valid length.
* In this case, we generate a protocol violation since we have
* an association established.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
/* Tag the variable length parameters. */
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
retval = SCTP_DISPOSITION_CONSUME;
} else {
retval = SCTP_DISPOSITION_NOMEM;
}
goto cleanup;
} else {
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
commands);
}
}
/*
* Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of
* outbound streams) into the INIT ACK and cookie.
* FIXME: We are copying parameters from the endpoint not the
* association.
*/
new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
if (!new_asoc)
goto nomem;
if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
goto nomem;
/* In the outbound INIT ACK the endpoint MUST copy its current
* Verification Tag and Peers Verification tag into a reserved
* place (local tie-tag and per tie-tag) within the state cookie.
*/
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC))
goto nomem;
/* Make sure no new addresses are being added during the
* restart. Do not do this check for COOKIE-WAIT state,
* since there are no peer addresses to check against.
* Upon return an ABORT will have been sent if needed.
*/
if (!sctp_state(asoc, COOKIE_WAIT)) {
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk,
commands)) {
retval = SCTP_DISPOSITION_CONSUME;
goto nomem_retval;
}
}
sctp_tietags_populate(new_asoc, asoc);
/* B) "Z" shall respond immediately with an INIT ACK chunk. */
/* If there are errors need to be reported for unknown parameters,
* make sure to reserve enough room in the INIT ACK for them.
*/
len = 0;
if (err_chunk) {
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
}
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem;
/* If there are errors need to be reported for unknown parameters,
* include them in the outgoing INIT ACK as "Unrecognized parameter"
* parameter.
*/
if (err_chunk) {
/* Get the "Unrecognized parameter" parameter(s) out of the
* ERROR chunk generated by sctp_verify_init(). Since the
* error cause code for "unknown parameter" and the
* "Unrecognized parameter" type is the same, we can
* construct the parameters in INIT ACK by copying the
* ERROR causes over.
*/
unk_param = (sctp_unrecognized_param_t *)
((__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t));
/* Replace the cause code with the "Unrecognized parameter"
* parameter type.
*/
sctp_addto_chunk(repl, len, unk_param);
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/*
* Note: After sending out INIT ACK with the State Cookie parameter,
* "Z" MUST NOT allocate any resources for this new association.
* Otherwise, "Z" will be vulnerable to resource attacks.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
retval = SCTP_DISPOSITION_CONSUME;
return retval;
nomem:
retval = SCTP_DISPOSITION_NOMEM;
nomem_retval:
if (new_asoc)
sctp_association_free(new_asoc);
cleanup:
if (err_chunk)
sctp_chunk_free(err_chunk);
return retval;
}
/*
* Handle simultaneous INIT.
* This means we started an INIT and then we got an INIT request from
* our peer.
*
* Section: 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State (Item B)
* This usually indicates an initialization collision, i.e., each
* endpoint is attempting, at about the same time, to establish an
* association with the other endpoint.
*
* Upon receipt of an INIT in the COOKIE-WAIT or COOKIE-ECHOED state, an
* endpoint MUST respond with an INIT ACK using the same parameters it
* sent in its original INIT chunk (including its Verification Tag,
* unchanged). These original parameters are combined with those from the
* newly received INIT chunk. The endpoint shall also generate a State
* Cookie with the INIT ACK. The endpoint uses the parameters sent in its
* INIT to calculate the State Cookie.
*
* After that, the endpoint MUST NOT change its state, the T1-init
* timer shall be left running and the corresponding TCB MUST NOT be
* destroyed. The normal procedures for handling State Cookies when
* a TCB exists will resolve the duplicate INITs to a single association.
*
* For an endpoint that is in the COOKIE-ECHOED state it MUST populate
* its Tie-Tags with the Tag information of itself and its peer (see
* section 5.2.2 for a description of the Tie-Tags).
*
* Verification Tag: Not explicit, but an INIT can not have a valid
* verification tag, so we skip the check.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Call helper to do the real work for both simulataneous and
* duplicate INIT chunk handling.
*/
return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
}
/*
* Handle duplicated INIT messages. These are usually delayed
* restransmissions.
*
* Section: 5.2.2 Unexpected INIT in States Other than CLOSED,
* COOKIE-ECHOED and COOKIE-WAIT
*
* Unless otherwise stated, upon reception of an unexpected INIT for
* this association, the endpoint shall generate an INIT ACK with a
* State Cookie. In the outbound INIT ACK the endpoint MUST copy its
* current Verification Tag and peer's Verification Tag into a reserved
* place within the state cookie. We shall refer to these locations as
* the Peer's-Tie-Tag and the Local-Tie-Tag. The outbound SCTP packet
* containing this INIT ACK MUST carry a Verification Tag value equal to
* the Initiation Tag found in the unexpected INIT. And the INIT ACK
* MUST contain a new Initiation Tag (randomly generated see Section
* 5.3.1). Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of outbound
* streams) into the INIT ACK and cookie.
*
* After sending out the INIT ACK, the endpoint shall take no further
* actions, i.e., the existing association, including its current state,
* and the corresponding TCB MUST NOT be changed.
*
* Note: Only when a TCB exists and the association is not in a COOKIE-
* WAIT state are the Tie-Tags populated. For a normal association INIT
* (i.e. the endpoint is in a COOKIE-WAIT state), the Tie-Tags MUST be
* set to 0 (indicating that no previous TCB existed). The INIT ACK and
* State Cookie are populated as specified in section 5.2.1.
*
* Verification Tag: Not specified, but an INIT has no way of knowing
* what the verification tag could be, so we ignore it.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Call helper to do the real work for both simulataneous and
* duplicate INIT chunk handling.
*/
return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
}
/*
* Unexpected INIT-ACK handler.
*
* Section 5.2.3
* If an INIT ACK received by an endpoint in any state other than the
* COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk.
* An unexpected INIT ACK usually indicates the processing of an old or
* duplicated INIT chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
/* Per the above section, we'll discard the chunk if we have an
* endpoint. If this is an OOTB INIT-ACK, treat it as such.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands);
else
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
}
/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
*
* Section 5.2.4
* A) In this case, the peer may have restarted.
*/
static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
sctp_init_chunk_t *peer_init;
struct sctp_ulpevent *ev;
struct sctp_chunk *repl;
struct sctp_chunk *err;
sctp_disposition_t disposition;
/* new_asoc is a brand-new association, so these are not yet
* side effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
GFP_ATOMIC))
goto nomem;
/* Make sure no new addresses are being added during the
* restart. Though this is a pretty complicated attack
* since you'd have to get inside the cookie.
*/
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
return SCTP_DISPOSITION_CONSUME;
}
/* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
* the peer has restarted (Action A), it MUST NOT setup a new
* association but instead resend the SHUTDOWN ACK and send an ERROR
* chunk with a "Cookie Received while Shutting Down" error cause to
* its peer.
*/
if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
disposition = sctp_sf_do_9_2_reshutack(ep, asoc,
SCTP_ST_CHUNK(chunk->chunk_hdr->type),
chunk, commands);
if (SCTP_DISPOSITION_NOMEM == disposition)
goto nomem;
err = sctp_make_op_error(asoc, chunk,
SCTP_ERROR_COOKIE_IN_SHUTDOWN,
NULL, 0, 0);
if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
return SCTP_DISPOSITION_CONSUME;
}
/* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
* data. Consider the optional choice of resending of this data.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
/* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
* and ASCONF-ACK cache.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
/* Report association restart to upper layer. */
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
new_asoc->c.sinit_num_ostreams,
new_asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem_ev;
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
return SCTP_DISPOSITION_CONSUME;
nomem_ev:
sctp_chunk_free(repl);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'B')
*
* Section 5.2.4
* B) In this case, both sides may be attempting to start an association
* at about the same time but the peer endpoint started its INIT
* after responding to the local endpoint's INIT
*/
/* This case represents an initialization collision. */
static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
sctp_init_chunk_t *peer_init;
struct sctp_chunk *repl;
/* new_asoc is a brand-new association, so these are not yet
* side effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
GFP_ATOMIC))
goto nomem;
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose to
* send the Communication Up notification to the SCTP user
* upon reception of a valid COOKIE ECHO chunk.
*
* Sadly, this needs to be implemented as a side-effect, because
* we are not guaranteed to have set the association id of the real
* association and so these notifications need to be delayed until
* the association id is allocated.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_CHANGE, SCTP_U8(SCTP_COMM_UP));
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*
* This also needs to be done as a side effect for the same reason as
* above.
*/
if (asoc->peer.adaptation_ind)
sctp_add_cmd_sf(commands, SCTP_CMD_ADAPTATION_IND, SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'C')
*
* Section 5.2.4
* C) In this case, the local endpoint's cookie has arrived late.
* Before it arrived, the local endpoint sent an INIT and received an
* INIT-ACK and finally sent a COOKIE ECHO with the peer's same tag
* but a new tag of its own.
*/
/* This case represents an initialization collision. */
static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
/* The cookie should be silently discarded.
* The endpoint SHOULD NOT change states and should leave
* any timers running.
*/
return SCTP_DISPOSITION_DISCARD;
}
/* Unexpected COOKIE-ECHO handler lost chunk (Table 2, action 'D')
*
* Section 5.2.4
*
* D) When both local and remote tags match the endpoint should always
* enter the ESTABLISHED state, if it has not already done so.
*/
/* This case represents an initialization collision. */
static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
struct sctp_ulpevent *ev = NULL, *ai_ev = NULL;
struct sctp_chunk *repl;
/* Clarification from Implementor's Guide:
* D) When both local and remote tags match the endpoint should
* enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
* It should stop any cookie timer that may be running and send
* a COOKIE ACK.
*/
/* Don't accidentally move back into established state. */
if (asoc->state < SCTP_STATE_ESTABLISHED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
SCTP_NULL());
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose
* to send the Communication Up notification to the
* SCTP user upon reception of a valid COOKIE
* ECHO chunk.
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0,
SCTP_COMM_UP, 0,
asoc->c.sinit_num_ostreams,
asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem;
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter,
* SCTP delivers this notification to inform the application
* that of the peers requested adaptation layer.
*/
if (asoc->peer.adaptation_ind) {
ai_ev = sctp_ulpevent_make_adaptation_indication(asoc,
GFP_ATOMIC);
if (!ai_ev)
goto nomem;
}
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
if (ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
if (ai_ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ai_ev));
return SCTP_DISPOSITION_CONSUME;
nomem:
if (ai_ev)
sctp_ulpevent_free(ai_ev);
if (ev)
sctp_ulpevent_free(ev);
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle a duplicate COOKIE-ECHO. This usually means a cookie-carrying
* chunk was retransmitted and then delayed in the network.
*
* Section: 5.2.4 Handle a COOKIE ECHO when a TCB exists
*
* Verification Tag: None. Do cookie validation.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_disposition_t retval;
struct sctp_chunk *chunk = arg;
struct sctp_association *new_asoc;
int error = 0;
char action;
struct sctp_chunk *err_chk_p;
/* Make sure that the chunk has a valid length from the protocol
* perspective. In this case check to make sure we have at least
* enough for the chunk header. Cookie length verification is
* done later.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
/* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie
* of a duplicate COOKIE ECHO match the Verification Tags of the
* current association, consider the State Cookie valid even if
* the lifespan is exceeded.
*/
new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
&err_chk_p);
/* FIXME:
* If the re-build failed, what is the proper error path
* from here?
*
* [We should abort the association. --piggy]
*/
if (!new_asoc) {
/* FIXME: Several errors are possible. A bad cookie should
* be silently discarded, but think about logging it too.
*/
switch (error) {
case -SCTP_IERROR_NOMEM:
goto nomem;
case -SCTP_IERROR_STALE_COOKIE:
sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
err_chk_p);
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
case -SCTP_IERROR_BAD_SIG:
default:
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
}
/* Compare the tie_tag in cookie with the verification tag of
* current association.
*/
action = sctp_tietags_compare(new_asoc, asoc);
switch (action) {
case 'A': /* Association restart. */
retval = sctp_sf_do_dupcook_a(ep, asoc, chunk, commands,
new_asoc);
break;
case 'B': /* Collision case B. */
retval = sctp_sf_do_dupcook_b(ep, asoc, chunk, commands,
new_asoc);
break;
case 'C': /* Collision case C. */
retval = sctp_sf_do_dupcook_c(ep, asoc, chunk, commands,
new_asoc);
break;
case 'D': /* Collision case D. */
retval = sctp_sf_do_dupcook_d(ep, asoc, chunk, commands,
new_asoc);
break;
default: /* Discard packet for all others. */
retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands);
break;
}
/* Delete the tempory new association. */
sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
/* Restore association pointer to provide SCTP command interpeter
* with a valid context in case it needs to manipulate
* the queues */
sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
SCTP_ASOC((struct sctp_association *)asoc));
return retval;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process an ABORT. (SHUTDOWN-PENDING state)
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t sctp_sf_shutdown_pending_abort(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* ADD-IP: Special case for ABORT chunks
* F4) One special consideration is that ABORT Chunks arriving
* destined to the IP address being deleted MUST be
* ignored (see Section 5.3.1 for further details).
*/
if (SCTP_ADDR_DEL ==
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
}
/*
* Process an ABORT. (SHUTDOWN-SENT state)
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* ADD-IP: Special case for ABORT chunks
* F4) One special consideration is that ABORT Chunks arriving
* destined to the IP address being deleted MUST be
* ignored (see Section 5.3.1 for further details).
*/
if (SCTP_ADDR_DEL ==
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
/* Stop the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
/* Stop the T5-shutdown guard timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
}
/*
* Process an ABORT. (SHUTDOWN-ACK-SENT state)
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* The same T2 timer, so we should be able to use
* common function with the SHUTDOWN-SENT state.
*/
return sctp_sf_shutdown_sent_abort(ep, asoc, type, arg, commands);
}
/*
* Handle an Error received in COOKIE_ECHOED state.
*
* Only handle the error type of stale COOKIE Error, the other errors will
* be ignored.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_errhdr_t *err;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the ERROR chunk has a valid length.
* The parameter walking depends on this as well.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Process the error here */
/* FUTURE FIXME: When PR-SCTP related and other optional
* parms are emitted, this will have to change to handle multiple
* errors.
*/
sctp_walk_errors(err, chunk->chunk_hdr) {
if (SCTP_ERROR_STALE_COOKIE == err->cause)
return sctp_sf_do_5_2_6_stale(ep, asoc, type,
arg, commands);
}
/* It is possible to have malformed error causes, and that
* will cause us to end the walk early. However, since
* we are discarding the packet, there should be no adverse
* affects.
*/
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/*
* Handle a Stale COOKIE Error
*
* Section: 5.2.6 Handle Stale COOKIE Error
* If the association is in the COOKIE-ECHOED state, the endpoint may elect
* one of the following three alternatives.
* ...
* 3) Send a new INIT chunk to the endpoint, adding a Cookie
* Preservative parameter requesting an extension to the lifetime of
* the State Cookie. When calculating the time extension, an
* implementation SHOULD use the RTT information measured based on the
* previous COOKIE ECHO / ERROR exchange, and should add no more
* than 1 second beyond the measured RTT, due to long State Cookie
* lifetimes making the endpoint more subject to a replay attack.
*
* Verification Tag: Not explicit, but safe to ignore.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
time_t stale;
sctp_cookie_preserve_param_t bht;
sctp_errhdr_t *err;
struct sctp_chunk *reply;
struct sctp_bind_addr *bp;
int attempts = asoc->init_err_counter + 1;
if (attempts > asoc->max_init_attempts) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_STALE_COOKIE));
return SCTP_DISPOSITION_DELETE_TCB;
}
err = (sctp_errhdr_t *)(chunk->skb->data);
/* When calculating the time extension, an implementation
* SHOULD use the RTT information measured based on the
* previous COOKIE ECHO / ERROR exchange, and should add no
* more than 1 second beyond the measured RTT, due to long
* State Cookie lifetimes making the endpoint more subject to
* a replay attack.
* Measure of Staleness's unit is usec. (1/1000000 sec)
* Suggested Cookie Life-span Increment's unit is msec.
* (1/1000 sec)
* In general, if you use the suggested cookie life, the value
* found in the field of measure of staleness should be doubled
* to give ample time to retransmit the new cookie and thus
* yield a higher probability of success on the reattempt.
*/
stale = ntohl(*(__be32 *)((u8 *)err + sizeof(sctp_errhdr_t)));
stale = (stale * 2) / 1000;
bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE;
bht.param_hdr.length = htons(sizeof(bht));
bht.lifespan_increment = htonl(stale);
/* Build that new INIT chunk. */
bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht));
if (!reply)
goto nomem;
sctp_addto_chunk(reply, sizeof(bht), &bht);
/* Clear peer's init_tag cached in assoc as we are sending a new INIT */
sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL());
/* Stop pending T3-rtx and heartbeat timers */
sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
/* Delete non-primary peer ip addresses since we are transitioning
* back to the COOKIE-WAIT state
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
/* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend
*/
sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
SCTP_TRANSPORT(asoc->peer.primary_path));
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_INC, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process an ABORT.
*
* Section: 9.1
* After checking the Verification Tag, the receiving endpoint shall
* remove the association from its record, and shall report the
* termination to its upper layer.
*
* Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
* B) Rules for packet carrying ABORT:
*
* - The endpoint shall always fill in the Verification Tag field of the
* outbound packet with the destination endpoint's tag value if it
* is known.
*
* - If the ABORT is sent in response to an OOTB packet, the endpoint
* MUST follow the procedure described in Section 8.4.
*
* - The receiver MUST accept the packet if the Verification Tag
* matches either its own tag, OR the tag of its peer. Otherwise, the
* receiver MUST silently discard the packet and take no further
* action.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* ADD-IP: Special case for ABORT chunks
* F4) One special consideration is that ABORT Chunks arriving
* destined to the IP address being deleted MUST be
* ignored (see Section 5.3.1 for further details).
*/
if (SCTP_ADDR_DEL ==
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
}
static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
unsigned len;
__be16 error = SCTP_ERROR_NO_ERROR;
/* See if we have an error cause code in the chunk. */
len = ntohs(chunk->chunk_hdr->length);
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
sctp_errhdr_t *err;
sctp_walk_errors(err, chunk->chunk_hdr);
if ((void *)err != (void *)chunk->chunk_end)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
}
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
/*
* Process an ABORT. (COOKIE-WAIT state)
*
* See sctp_sf_do_9_1_abort() above.
*/
sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
unsigned len;
__be16 error = SCTP_ERROR_NO_ERROR;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* See if we have an error cause code in the chunk. */
len = ntohs(chunk->chunk_hdr->length);
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
chunk->transport);
}
/*
* Process an incoming ICMP as an ABORT. (COOKIE-WAIT state)
*/
sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
ENOPROTOOPT, asoc,
(struct sctp_transport *)arg);
}
/*
* Process an ABORT. (COOKIE-ECHOED state)
*/
sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
*/
return sctp_sf_cookie_wait_abort(ep, asoc, type, arg, commands);
}
/*
* Stop T1 timer and abort association with "INIT failed".
*
* This is common code called by several sctp_sf_*_abort() functions above.
*/
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
__be16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport)
{
SCTP_DEBUG_PRINTK("ABORT received (INIT).\n");
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
/* CMD_INIT_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(error));
return SCTP_DISPOSITION_ABORT;
}
/*
* sctp_sf_do_9_2_shut
*
* Section: 9.2
* Upon the reception of the SHUTDOWN, the peer endpoint shall
* - enter the SHUTDOWN-RECEIVED state,
*
* - stop accepting new data from its SCTP user
*
* - verify, by checking the Cumulative TSN Ack field of the chunk,
* that all its outstanding DATA chunks have been received by the
* SHUTDOWN sender.
*
* Once an endpoint as reached the SHUTDOWN-RECEIVED state it MUST NOT
* send a SHUTDOWN in response to a ULP request. And should discard
* subsequent SHUTDOWN chunks.
*
* If there are still outstanding DATA chunks left, the SHUTDOWN
* receiver shall continue to follow normal data transmission
* procedures defined in Section 6 until all outstanding DATA chunks
* are acknowledged; however, the SHUTDOWN receiver MUST NOT accept
* new data from its SCTP user.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_shutdownhdr_t *sdh;
sctp_disposition_t disposition;
struct sctp_ulpevent *ev;
__u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk,
sizeof(struct sctp_shutdown_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Convert the elaborate header. */
sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t));
chunk->subh.shutdown_hdr = sdh;
ctsn = ntohl(sdh->cum_tsn_ack);
if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn);
SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point);
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
/* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
* When a peer sends a SHUTDOWN, SCTP delivers this notification to
* inform the application that it should cease sending data.
*/
ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
if (!ev) {
disposition = SCTP_DISPOSITION_NOMEM;
goto out;
}
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Upon the reception of the SHUTDOWN, the peer endpoint shall
* - enter the SHUTDOWN-RECEIVED state,
* - stop accepting new data from its SCTP user
*
* [This is implicit in the new state.]
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type,
arg, commands);
}
if (SCTP_DISPOSITION_NOMEM == disposition)
goto out;
/* - verify, by checking the Cumulative TSN Ack field of the
* chunk, that all its outstanding DATA chunks have been
* received by the SHUTDOWN sender.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack));
out:
return disposition;
}
/*
* sctp_sf_do_9_2_shut_ctsn
*
* Once an endpoint has reached the SHUTDOWN-RECEIVED state,
* it MUST NOT send a SHUTDOWN in response to a ULP request.
* The Cumulative TSN Ack of the received SHUTDOWN chunk
* MUST be processed.
*/
sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_shutdownhdr_t *sdh;
__u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk,
sizeof(struct sctp_shutdown_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
ctsn = ntohl(sdh->cum_tsn_ack);
if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn);
SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point);
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
/* verify, by checking the Cumulative TSN Ack field of the
* chunk, that all its outstanding DATA chunks have been
* received by the SHUTDOWN sender.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
SCTP_BE32(sdh->cum_tsn_ack));
return SCTP_DISPOSITION_CONSUME;
}
/* RFC 2960 9.2
* If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk
* (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination
* transport addresses (either in the IP addresses or in the INIT chunk)
* that belong to this association, it should discard the INIT chunk and
* retransmit the SHUTDOWN ACK chunk.
*/
sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
struct sctp_chunk *reply;
/* Make sure that the chunk has a valid length */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Since we are not going to really process this INIT, there
* is no point in verifying chunk boundries. Just generate
* the SHUTDOWN ACK.
*/
reply = sctp_make_shutdown_ack(asoc, chunk);
if (NULL == reply)
goto nomem;
/* Set the transport for the SHUTDOWN ACK chunk and the timeout for
* the T2-SHUTDOWN timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* and restart the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* sctp_sf_do_ecn_cwr
*
* Section: Appendix A: Explicit Congestion Notification
*
* CWR:
*
* RFC 2481 details a specific bit for a sender to send in the header of
* its next outbound TCP segment to indicate to its peer that it has
* reduced its congestion window. This is termed the CWR bit. For
* SCTP the same indication is made by including the CWR chunk.
* This chunk contains one data element, i.e. the TSN number that
* was sent in the ECNE chunk. This element represents the lowest
* TSN number in the datagram that was originally marked with the
* CE bit.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_cwrhdr_t *cwr;
struct sctp_chunk *chunk = arg;
u32 lowest_tsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
cwr = (sctp_cwrhdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t));
lowest_tsn = ntohl(cwr->lowest_tsn);
/* Does this CWR ack the last sent congestion notification? */
if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) {
/* Stop sending ECNE. */
sctp_add_cmd_sf(commands,
SCTP_CMD_ECN_CWR,
SCTP_U32(lowest_tsn));
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_do_ecne
*
* Section: Appendix A: Explicit Congestion Notification
*
* ECN-Echo
*
* RFC 2481 details a specific bit for a receiver to send back in its
* TCP acknowledgements to notify the sender of the Congestion
* Experienced (CE) bit having arrived from the network. For SCTP this
* same indication is made by including the ECNE chunk. This chunk
* contains one data element, i.e. the lowest TSN associated with the IP
* datagram marked with the CE bit.....
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_ecnehdr_t *ecne;
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
ecne = (sctp_ecnehdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t));
/* If this is a newer ECNE than the last CWR packet we sent out */
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE,
SCTP_U32(ntohl(ecne->lowest_tsn)));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Section: 6.2 Acknowledgement on Reception of DATA Chunks
*
* The SCTP endpoint MUST always acknowledge the reception of each valid
* DATA chunk.
*
* The guidelines on delayed acknowledgement algorithm specified in
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
* acknowledgement SHOULD be generated for at least every second packet
* (not every second DATA chunk) received, and SHOULD be generated within
* 200 ms of the arrival of any unacknowledged DATA chunk. In some
* situations it may be beneficial for an SCTP transmitter to be more
* conservative than the algorithms detailed in this document allow.
* However, an SCTP transmitter MUST NOT be more aggressive than the
* following algorithms allow.
*
* A SCTP receiver MUST NOT generate more than one SACK for every
* incoming packet, other than to update the offered window as the
* receiving application consumes new data.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_arg_t force = SCTP_NOFORCE();
int error;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
error = sctp_eat_data(asoc, chunk, commands );
switch (error) {
case SCTP_IERROR_NO_ERROR:
break;
case SCTP_IERROR_HIGH_TSN:
case SCTP_IERROR_BAD_STREAM:
SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
goto discard_noforce;
case SCTP_IERROR_DUP_TSN:
case SCTP_IERROR_IGNORE_TSN:
SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
goto discard_force;
case SCTP_IERROR_NO_DATA:
goto consume;
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
default:
BUG();
}
if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
force = SCTP_FORCE();
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* If this is the last chunk in a packet, we need to count it
* toward sack generation. Note that we need to SACK every
* OTHER packet containing data chunks, EVEN IF WE DISCARD
* THEM. We elect to NOT generate SACK's if the chunk fails
* the verification tag test.
*
* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
*
* The SCTP endpoint MUST always acknowledge the reception of
* each valid DATA chunk.
*
* The guidelines on delayed acknowledgement algorithm
* specified in Section 4.2 of [RFC2581] SHOULD be followed.
* Specifically, an acknowledgement SHOULD be generated for at
* least every second packet (not every second DATA chunk)
* received, and SHOULD be generated within 200 ms of the
* arrival of any unacknowledged DATA chunk. In some
* situations it may be beneficial for an SCTP transmitter to
* be more conservative than the algorithms detailed in this
* document allow. However, an SCTP transmitter MUST NOT be
* more aggressive than the following algorithms allow.
*/
if (chunk->end_of_packet)
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
return SCTP_DISPOSITION_CONSUME;
discard_force:
/* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
*
* When a packet arrives with duplicate DATA chunk(s) and with
* no new DATA chunk(s), the endpoint MUST immediately send a
* SACK with no delay. If a packet arrives with duplicate
* DATA chunk(s) bundled with new DATA chunks, the endpoint
* MAY immediately send a SACK. Normally receipt of duplicate
* DATA chunks will occur when the original SACK chunk was lost
* and the peer's RTO has expired. The duplicate TSN number(s)
* SHOULD be reported in the SACK as duplicate.
*/
/* In our case, we split the MAY SACK advice up whether or not
* the last chunk is a duplicate.'
*/
if (chunk->end_of_packet)
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
return SCTP_DISPOSITION_DISCARD;
discard_noforce:
if (chunk->end_of_packet)
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
return SCTP_DISPOSITION_DISCARD;
consume:
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_eat_data_fast_4_4
*
* Section: 4 (4)
* (4) In SHUTDOWN-SENT state the endpoint MUST acknowledge any received
* DATA chunks without delay.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
int error;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
error = sctp_eat_data(asoc, chunk, commands );
switch (error) {
case SCTP_IERROR_NO_ERROR:
case SCTP_IERROR_HIGH_TSN:
case SCTP_IERROR_DUP_TSN:
case SCTP_IERROR_IGNORE_TSN:
case SCTP_IERROR_BAD_STREAM:
break;
case SCTP_IERROR_NO_DATA:
goto consume;
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
default:
BUG();
}
/* Go a head and force a SACK, since we are shutting down. */
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
if (chunk->end_of_packet) {
/* We must delay the chunk creation since the cumulative
* TSN has not been updated yet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
}
consume:
return SCTP_DISPOSITION_CONSUME;
}
/*
* Section: 6.2 Processing a Received SACK
* D) Any time a SACK arrives, the endpoint performs the following:
*
* i) If Cumulative TSN Ack is less than the Cumulative TSN Ack Point,
* then drop the SACK. Since Cumulative TSN Ack is monotonically
* increasing, a SACK whose Cumulative TSN Ack is less than the
* Cumulative TSN Ack Point indicates an out-of-order SACK.
*
* ii) Set rwnd equal to the newly received a_rwnd minus the number
* of bytes still outstanding after processing the Cumulative TSN Ack
* and the Gap Ack Blocks.
*
* iii) If the SACK is missing a TSN that was previously
* acknowledged via a Gap Ack Block (e.g., the data receiver
* reneged on the data), then mark the corresponding DATA chunk
* as available for retransmit: Mark it as missing for fast
* retransmit as described in Section 7.2.4 and if no retransmit
* timer is running for the destination address to which the DATA
* chunk was originally transmitted, then T3-rtx is started for
* that destination address.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_sackhdr_t *sackh;
__u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the SACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Pull the SACK chunk from the data buffer */
sackh = sctp_sm_pull_sack(chunk);
/* Was this a bogus SACK? */
if (!sackh)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
chunk->subh.sack_hdr = sackh;
ctsn = ntohl(sackh->cum_tsn_ack);
/* i) If Cumulative TSN Ack is less than the Cumulative TSN
* Ack Point, then drop the SACK. Since Cumulative TSN
* Ack is monotonically increasing, a SACK whose
* Cumulative TSN Ack is less than the Cumulative TSN Ack
* Point indicates an out-of-order SACK.
*/
if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn);
SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point);
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
/* Return this SACK for further processing. */
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh));
/* Note: We do the rest of the work on the PROCESS_SACK
* sideeffect.
*/
return SCTP_DISPOSITION_CONSUME;
}
/*
* Generate an ABORT in response to a packet.
*
* Section: 8.4 Handle "Out of the blue" Packets, sctpimpguide 2.41
*
* 8) The receiver should respond to the sender of the OOTB packet with
* an ABORT. When sending the ABORT, the receiver of the OOTB packet
* MUST fill in the Verification Tag field of the outbound packet
* with the value found in the Verification Tag field of the OOTB
* packet and set the T-bit in the Chunk Flags to indicate that the
* Verification Tag is reflected. After sending this ABORT, the
* receiver of the OOTB packet shall discard the OOTB packet and take
* no further action.
*
* Verification Tag:
*
* The return value is the disposition of the chunk.
*/
static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort;
packet = sctp_ootb_pkt_new(asoc, chunk);
if (packet) {
/* Make an ABORT. The T bit will be set if the asoc
* is NULL.
*/
abort = sctp_make_abort(asoc, chunk, 0);
if (!abort) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
}
/* Reflect vtag if T-Bit is set */
if (sctp_test_T_bit(abort))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
/* Set the skb to the belonging sock for accounting. */
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
sctp_sf_pdiscard(ep, asoc, type, arg, commands);
return SCTP_DISPOSITION_CONSUME;
}
return SCTP_DISPOSITION_NOMEM;
}
/*
* Received an ERROR chunk from peer. Generate SCTP_REMOTE_ERROR
* event as ULP notification for each cause included in the chunk.
*
* API 5.3.1.3 - SCTP_REMOTE_ERROR
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_errhdr_t *err;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the ERROR chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
sctp_walk_errors(err, chunk->chunk_hdr);
if ((void *)err != (void *)chunk->chunk_end)
return sctp_sf_violation_paramlen(ep, asoc, type, arg,
(void *)err, commands);
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
SCTP_CHUNK(chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an inbound SHUTDOWN ACK.
*
* From Section 9.2:
* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
* stop the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its
* peer, and remove all record of the association.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *reply;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* 10.2 H) SHUTDOWN COMPLETE notification
*
* When SCTP completes the shutdown procedures (section 9.2) this
* notification is passed to the upper layer.
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
0, 0, 0, NULL, GFP_ATOMIC);
if (!ev)
goto nomem;
/* ...send a SHUTDOWN COMPLETE chunk to its peer, */
reply = sctp_make_shutdown_complete(asoc, chunk);
if (!reply)
goto nomem_chunk;
/* Do all the commands now (after allocation), so that we
* have consistent state if memory allocation failes
*/
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
* stop the T2-shutdown timer,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
/* ...and remove all record of the association. */
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
nomem_chunk:
sctp_ulpevent_free(ev);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets, sctpimpguide 2.41.
*
* 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
* respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
* When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
* packet must fill in the Verification Tag field of the outbound
* packet with the Verification Tag received in the SHUTDOWN ACK and
* set the T-bit in the Chunk Flags to indicate that the Verification
* Tag is reflected.
*
* 8) The receiver should respond to the sender of the OOTB packet with
* an ABORT. When sending the ABORT, the receiver of the OOTB packet
* MUST fill in the Verification Tag field of the outbound packet
* with the value found in the Verification Tag field of the OOTB
* packet and set the T-bit in the Chunk Flags to indicate that the
* Verification Tag is reflected. After sending this ABORT, the
* receiver of the OOTB packet shall discard the OOTB packet and take
* no further action.
*/
sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sk_buff *skb = chunk->skb;
sctp_chunkhdr_t *ch;
sctp_errhdr_t *err;
__u8 *ch_end;
int ootb_shut_ack = 0;
int ootb_cookie_ack = 0;
SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
/* Report violation if the chunk is less then minimal */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
if (SCTP_CID_SHUTDOWN_ACK == ch->type)
ootb_shut_ack = 1;
/* RFC 2960, Section 3.3.7
* Moreover, under any circumstances, an endpoint that
* receives an ABORT MUST NOT respond to that ABORT by
* sending an ABORT of its own.
*/
if (SCTP_CID_ABORT == ch->type)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
* or a COOKIE ACK the SCTP Packet should be silently
* discarded.
*/
if (SCTP_CID_COOKIE_ACK == ch->type)
ootb_cookie_ack = 1;
if (SCTP_CID_ERROR == ch->type) {
sctp_walk_errors(err, ch) {
if (SCTP_ERROR_STALE_COOKIE == err->cause) {
ootb_cookie_ack = 1;
break;
}
}
}
/* Report violation if chunk len overflows */
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
if (ootb_shut_ack)
return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
else if (ootb_cookie_ack)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
else
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
}
/*
* Handle an "Out of the blue" SHUTDOWN ACK.
*
* Section: 8.4 5, sctpimpguide 2.41.
*
* 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
* respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
* When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
* packet must fill in the Verification Tag field of the outbound
* packet with the Verification Tag received in the SHUTDOWN ACK and
* set the T-bit in the Chunk Flags to indicate that the Verification
* Tag is reflected.
*
* Inputs
* (endpoint, asoc, type, arg, commands)
*
* Outputs
* (sctp_disposition_t)
*
* The return value is the disposition of the chunk.
*/
static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *shut;
packet = sctp_ootb_pkt_new(asoc, chunk);
if (packet) {
/* Make an SHUTDOWN_COMPLETE.
* The T bit will be set if the asoc is NULL.
*/
shut = sctp_make_shutdown_complete(asoc, chunk);
if (!shut) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
}
/* Reflect vtag if T-Bit is set */
if (sctp_test_T_bit(shut))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
/* Set the skb to the belonging sock for accounting. */
shut->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, shut);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
/* If the chunk length is invalid, we don't want to process
* the reset of the packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* We need to discard the rest of the packet to prevent
* potential bomming attacks from additional bundled chunks.
* This is documented in SCTP Threats ID.
*/
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle SHUTDOWN ACK in COOKIE_ECHOED or COOKIE_WAIT state.
*
* Verification Tag: 8.5.1 E) Rules for packet carrying a SHUTDOWN ACK
* If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state the
* procedures in section 8.4 SHOULD be followed, in other words it
* should be treated as an Out Of The Blue packet.
* [This means that we do NOT check the Verification Tag on these
* chunks. --piggy ]
*
*/
sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Although we do have an association in this case, it corresponds
* to a restarted association. So the packet is treated as an OOTB
* packet and the state function that handles OOTB SHUTDOWN_ACK is
* called with a NULL association.
*/
SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
return sctp_sf_shut_8_4_5(ep, NULL, type, arg, commands);
}
/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */
sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *asconf_ack = NULL;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *hdr;
union sctp_addr_param *addr_param;
__u32 serial;
int length;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* ADD-IP: Section 4.1.1
* This chunk MUST be sent in an authenticated way by using
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
* is received unauthenticated it MUST be silently discarded as
* described in [I-D.ietf-tsvwg-sctp-auth].
*/
if (!sctp_addip_noauth && !chunk->auth)
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
/* Make sure that the ASCONF ADDIP chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
hdr = (sctp_addiphdr_t *)chunk->skb->data;
serial = ntohl(hdr->serial);
addr_param = (union sctp_addr_param *)hdr->params;
length = ntohs(addr_param->p.length);
if (length < sizeof(sctp_paramhdr_t))
return sctp_sf_violation_paramlen(ep, asoc, type, arg,
(void *)addr_param, commands);
/* Verify the ASCONF chunk before processing it. */
if (!sctp_verify_asconf(asoc,
(sctp_paramhdr_t *)((void *)addr_param + length),
(void *)chunk->chunk_end,
&err_param))
return sctp_sf_violation_paramlen(ep, asoc, type, arg,
(void *)err_param, commands);
/* ADDIP 5.2 E1) Compare the value of the serial number to the value
* the endpoint stored in a new association variable
* 'Peer-Serial-Number'.
*/
if (serial == asoc->peer.addip_serial + 1) {
/* If this is the first instance of ASCONF in the packet,
* we can clean our old ASCONF-ACKs.
*/
if (!chunk->has_asconf)
sctp_assoc_clean_asconf_ack_cache(asoc);
/* ADDIP 5.2 E4) When the Sequence Number matches the next one
* expected, process the ASCONF as described below and after
* processing the ASCONF Chunk, append an ASCONF-ACK Chunk to
* the response packet and cache a copy of it (in the event it
* later needs to be retransmitted).
*
* Essentially, do V1-V5.
*/
asconf_ack = sctp_process_asconf((struct sctp_association *)
asoc, chunk);
if (!asconf_ack)
return SCTP_DISPOSITION_NOMEM;
} else if (serial < asoc->peer.addip_serial + 1) {
/* ADDIP 5.2 E2)
* If the value found in the Sequence Number is less than the
* ('Peer- Sequence-Number' + 1), simply skip to the next
* ASCONF, and include in the outbound response packet
* any previously cached ASCONF-ACK response that was
* sent and saved that matches the Sequence Number of the
* ASCONF. Note: It is possible that no cached ASCONF-ACK
* Chunk exists. This will occur when an older ASCONF
* arrives out of order. In such a case, the receiver
* should skip the ASCONF Chunk and not include ASCONF-ACK
* Chunk for that chunk.
*/
asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial);
if (!asconf_ack)
return SCTP_DISPOSITION_DISCARD;
/* Reset the transport so that we select the correct one
* this time around. This is to make sure that we don't
* accidentally use a stale transport that's been removed.
*/
asconf_ack->transport = NULL;
} else {
/* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since
* it must be either a stale packet or from an attacker.
*/
return SCTP_DISPOSITION_DISCARD;
}
/* ADDIP 5.2 E6) The destination address of the SCTP packet
* containing the ASCONF-ACK Chunks MUST be the source address of
* the SCTP packet that held the ASCONF Chunks.
*
* To do this properly, we'll set the destination address of the chunk
* and at the transmit time, will try look up the transport to use.
* Since ASCONFs may be bundled, the correct transport may not be
* created until we process the entire packet, thus this workaround.
*/
asconf_ack->dest = chunk->source;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
if (asoc->new_transport) {
sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport,
commands);
((struct sctp_association *)asoc)->new_transport = NULL;
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* ADDIP Section 4.3 General rules for address manipulation
* When building TLV parameters for the ASCONF Chunk that will add or
* delete IP addresses the D0 to D13 rules should be applied:
*/
sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *asconf_ack = arg;
struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
struct sctp_chunk *abort;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *addip_hdr;
__u32 sent_serial, rcvd_serial;
if (!sctp_vtag_verify(asconf_ack, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* ADD-IP, Section 4.1.2:
* This chunk MUST be sent in an authenticated way by using
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
* is received unauthenticated it MUST be silently discarded as
* described in [I-D.ietf-tsvwg-sctp-auth].
*/
if (!sctp_addip_noauth && !asconf_ack->auth)
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
/* Make sure that the ADDIP chunk has a valid length. */
if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
rcvd_serial = ntohl(addip_hdr->serial);
/* Verify the ASCONF-ACK chunk before processing it. */
if (!sctp_verify_asconf(asoc,
(sctp_paramhdr_t *)addip_hdr->params,
(void *)asconf_ack->chunk_end,
&err_param))
return sctp_sf_violation_paramlen(ep, asoc, type, arg,
(void *)err_param, commands);
if (last_asconf) {
addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
sent_serial = ntohl(addip_hdr->serial);
} else {
sent_serial = asoc->addip_serial - 1;
}
/* D0) If an endpoint receives an ASCONF-ACK that is greater than or
* equal to the next serial number to be used but no ASCONF chunk is
* outstanding the endpoint MUST ABORT the association. Note that a
* sequence number is greater than if it is no more than 2^^31-1
* larger than the current sequence number (using serial arithmetic).
*/
if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) &&
!(asoc->addip_last_asconf)) {
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
asconf_ack)) {
/* Successfully processed ASCONF_ACK. We can
* release the next asconf if we have one.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
}
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
return SCTP_DISPOSITION_DISCARD;
}
/*
* PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP
*
* When a FORWARD TSN chunk arrives, the data receiver MUST first update
* its cumulative TSN point to the value carried in the FORWARD TSN
* chunk, and then MUST further advance its cumulative TSN point locally
* if possible.
* After the above processing, the data receiver MUST stop reporting any
* missing TSNs earlier than or equal to the new cumulative TSN point.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
goto discard_noforce;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
}
sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
goto gen_shutdown;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Go a head and force a SACK, since we are shutting down. */
gen_shutdown:
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
}
/*
* SCTP-AUTH Section 6.3 Receiving authenticated chukns
*
* The receiver MUST use the HMAC algorithm indicated in the HMAC
* Identifier field. If this algorithm was not specified by the
* receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk
* during association setup, the AUTH chunk and all chunks after it MUST
* be discarded and an ERROR chunk SHOULD be sent with the error cause
* defined in Section 4.1.
*
* If an endpoint with no shared key receives a Shared Key Identifier
* other than 0, it MUST silently discard all authenticated chunks. If
* the endpoint has at least one endpoint pair shared key for the peer,
* it MUST use the key specified by the Shared Key Identifier if a
* key has been configured for that Shared Key Identifier. If no
* endpoint pair shared key has been configured for that Shared Key
* Identifier, all authenticated chunks MUST be silently discarded.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* The return value is the disposition of the chunk.
*/
static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
struct sctp_chunk *chunk)
{
struct sctp_authhdr *auth_hdr;
struct sctp_hmac *hmac;
unsigned int sig_len;
__u16 key_id;
__u8 *save_digest;
__u8 *digest;
/* Pull in the auth header, so we can do some more verification */
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
chunk->subh.auth_hdr = auth_hdr;
skb_pull(chunk->skb, sizeof(struct sctp_authhdr));
/* Make sure that we suport the HMAC algorithm from the auth
* chunk.
*/
if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id))
return SCTP_IERROR_AUTH_BAD_HMAC;
/* Make sure that the provided shared key identifier has been
* configured
*/
key_id = ntohs(auth_hdr->shkey_id);
if (key_id != asoc->active_key_id && !sctp_auth_get_shkey(asoc, key_id))
return SCTP_IERROR_AUTH_BAD_KEYID;
/* Make sure that the length of the signature matches what
* we expect.
*/
sig_len = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_auth_chunk_t);
hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id));
if (sig_len != hmac->hmac_len)
return SCTP_IERROR_PROTO_VIOLATION;
/* Now that we've done validation checks, we can compute and
* verify the hmac. The steps involved are:
* 1. Save the digest from the chunk.
* 2. Zero out the digest in the chunk.
* 3. Compute the new digest
* 4. Compare saved and new digests.
*/
digest = auth_hdr->hmac;
skb_pull(chunk->skb, sig_len);
save_digest = kmemdup(digest, sig_len, GFP_ATOMIC);
if (!save_digest)
goto nomem;
memset(digest, 0, sig_len);
sctp_auth_calculate_hmac(asoc, chunk->skb,
(struct sctp_auth_chunk *)chunk->chunk_hdr,
GFP_ATOMIC);
/* Discard the packet if the digests do not match */
if (memcmp(save_digest, digest, sig_len)) {
kfree(save_digest);
return SCTP_IERROR_BAD_SIG;
}
kfree(save_digest);
chunk->auth = 1;
return SCTP_IERROR_NO_ERROR;
nomem:
return SCTP_IERROR_NOMEM;
}
sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_authhdr *auth_hdr;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *err_chunk;
sctp_ierror_t error;
/* Make sure that the peer has AUTH capable */
if (!asoc->peer.auth_capable)
return sctp_sf_unk_chunk(ep, asoc, type, arg, commands);
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* Make sure that the AUTH chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
error = sctp_sf_authenticate(ep, asoc, type, chunk);
switch (error) {
case SCTP_IERROR_AUTH_BAD_HMAC:
/* Generate the ERROR chunk and discard the rest
* of the packet
*/
err_chunk = sctp_make_op_error(asoc, chunk,
SCTP_ERROR_UNSUP_HMAC,
&auth_hdr->hmac_id,
sizeof(__u16), 0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
/* Fall Through */
case SCTP_IERROR_AUTH_BAD_KEYID:
case SCTP_IERROR_BAD_SIG:
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
case SCTP_IERROR_NOMEM:
return SCTP_DISPOSITION_NOMEM;
default: /* Prevent gcc warnings */
break;
}
if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) {
struct sctp_ulpevent *ev;
ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id),
SCTP_AUTH_NEWKEY, GFP_ATOMIC);
if (!ev)
return -ENOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an unknown chunk.
*
* Section: 3.2. Also, 2.1 in the implementor's guide.
*
* Chunk Types are encoded such that the highest-order two bits specify
* the action that must be taken if the processing endpoint does not
* recognize the Chunk Type.
*
* 00 - Stop processing this SCTP packet and discard it, do not process
* any further chunks within it.
*
* 01 - Stop processing this SCTP packet and discard it, do not process
* any further chunks within it, and report the unrecognized
* chunk in an 'Unrecognized Chunk Type'.
*
* 10 - Skip this chunk and continue processing.
*
* 11 - Skip this chunk and continue processing, but report in an ERROR
* Chunk using the 'Unrecognized Chunk Type' cause of error.
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *unk_chunk = arg;
struct sctp_chunk *err_chunk;
sctp_chunkhdr_t *hdr;
SCTP_DEBUG_PRINTK("Processing the unknown chunk id %d.\n", type.chunk);
if (!sctp_vtag_verify(unk_chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the chunk has a valid length.
* Since we don't know the chunk type, we use a general
* chunkhdr structure to make a comparison.
*/
if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
switch (type.chunk & SCTP_CID_ACTION_MASK) {
case SCTP_CID_ACTION_DISCARD:
/* Discard the packet. */
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
break;
case SCTP_CID_ACTION_DISCARD_ERR:
/* Generate an ERROR chunk as response. */
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
WORD_ROUND(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
/* Discard the packet. */
sctp_sf_pdiscard(ep, asoc, type, arg, commands);
return SCTP_DISPOSITION_CONSUME;
break;
case SCTP_CID_ACTION_SKIP:
/* Skip the chunk. */
return SCTP_DISPOSITION_DISCARD;
break;
case SCTP_CID_ACTION_SKIP_ERR:
/* Generate an ERROR chunk as response. */
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
WORD_ROUND(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
/* Skip the chunk. */
return SCTP_DISPOSITION_CONSUME;
break;
default:
break;
}
return SCTP_DISPOSITION_DISCARD;
}
/*
* Discard the chunk.
*
* Section: 0.2, 5.2.3, 5.2.5, 5.2.6, 6.0, 8.4.6, 8.5.1c, 9.2
* [Too numerous to mention...]
* Verification Tag: No verification needed.
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
/* Make sure that the chunk has a valid length.
* Since we don't know the chunk type, we use a general
* chunkhdr structure to make a comparison.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
return SCTP_DISPOSITION_DISCARD;
}
/*
* Discard the whole packet.
*
* Section: 8.4 2)
*
* 2) If the OOTB packet contains an ABORT chunk, the receiver MUST
* silently discard the OOTB packet and take no further action.
*
* Verification Tag: No verification necessary
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
SCTP_INC_STATS(SCTP_MIB_IN_PKT_DISCARDS);
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
}
/*
* The other end is violating protocol.
*
* Section: Not specified
* Verification Tag: Not specified
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* We simply tag the chunk as a violation. The state machine will log
* the violation and continue.
*/
sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
/* Make sure that the chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
return SCTP_DISPOSITION_VIOLATION;
}
/*
* Common function to handle a protocol violation.
*/
static sctp_disposition_t sctp_sf_abort_violation(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen)
{
struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL;
/* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the
* handling of malformed packets should not result in
* tearing down the association.
*
* This means that if we only want to abort associations
* in an authenticated way (i.e AUTH+ABORT), then we
* can't destroy this association just because the packet
* was malformed.
*/
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard;
/* Make the abort chunk. */
abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
if (!abort)
goto nomem;
if (asoc) {
/* Treat INIT-ACK as a special case during COOKIE-WAIT. */
if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK &&
!asoc->peer.i.init_tag) {
sctp_initack_chunk_t *initack;
initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
if (!sctp_chunk_length_valid(chunk,
sizeof(sctp_initack_chunk_t)))
abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T;
else {
unsigned int inittag;
inittag = ntohl(initack->init_hdr.init_tag);
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_INITTAG,
SCTP_U32(inittag));
}
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNREFUSED));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
}
} else {
packet = sctp_ootb_pkt_new(asoc, chunk);
if (!packet)
goto nomem_pkt;
if (sctp_test_T_bit(abort))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
}
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
discard:
sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
return SCTP_DISPOSITION_ABORT;
nomem_pkt:
sctp_chunk_free(abort);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle a protocol violation when the chunk length is invalid.
* "Invalid" length is identified as smaller than the minimal length a
* given chunk can be. For example, a SACK chunk has invalid length
* if its length is set to be smaller than the size of sctp_sack_chunk_t.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
*
* Section: Not specified
* Verification Tag: Nothing to do
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (reply_msg, msg_up, counters)
*
* Generate an ABORT chunk and terminate the association.
*/
static sctp_disposition_t sctp_sf_violation_chunklen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[]="The following chunk had invalid length:";
return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
/*
* Handle a protocol violation when the parameter length is invalid.
* If the length is smaller than the minimum length of a given parameter,
* or accumulated length in multi parameters exceeds the end of the chunk,
* the length is considered as invalid.
*/
static sctp_disposition_t sctp_sf_violation_paramlen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, void *ext,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_paramhdr *param = ext;
struct sctp_chunk *abort = NULL;
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard;
/* Make the abort chunk. */
abort = sctp_make_violation_paramlen(asoc, chunk, param);
if (!abort)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
discard:
sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
return SCTP_DISPOSITION_ABORT;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Handle a protocol violation when the peer trying to advance the
* cumulative tsn ack to a point beyond the max tsn currently sent.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
*/
static sctp_disposition_t sctp_sf_violation_ctsn(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
/* Handle protocol violation of an invalid chunk bundling. For example,
* when we have an association and we receive bundled INIT-ACK, or
* SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
* statement from the specs. Additionally, there might be an attacker
* on the path and we may not want to continue this communication.
*/
static sctp_disposition_t sctp_sf_violation_chunk(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[]="The following chunk violates protocol:";
if (!asoc)
return sctp_sf_violation(ep, asoc, type, arg, commands);
return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
/***************************************************************************
* These are the state functions for handling primitive (Section 10) events.
***************************************************************************/
/*
* sctp_sf_do_prm_asoc
*
* Section: 10.1 ULP-to-SCTP
* B) Associate
*
* Format: ASSOCIATE(local SCTP instance name, destination transport addr,
* outbound stream count)
* -> association id [,destination transport addr list] [,outbound stream
* count]
*
* This primitive allows the upper layer to initiate an association to a
* specific peer endpoint.
*
* The peer endpoint shall be specified by one of the transport addresses
* which defines the endpoint (see Section 1.4). If the local SCTP
* instance has not been initialized, the ASSOCIATE is considered an
* error.
* [This is not relevant for the kernel implementation since we do all
* initialization at boot time. It we hadn't initialized we wouldn't
* get anywhere near this code.]
*
* An association id, which is a local handle to the SCTP association,
* will be returned on successful establishment of the association. If
* SCTP is not able to open an SCTP association with the peer endpoint,
* an error is returned.
* [In the kernel implementation, the struct sctp_association needs to
* be created BEFORE causing this primitive to run.]
*
* Other association parameters may be returned, including the
* complete destination transport addresses of the peer as well as the
* outbound stream count of the local endpoint. One of the transport
* address from the returned destination addresses will be selected by
* the local endpoint as default primary path for sending SCTP packets
* to this peer. The returned "destination transport addr list" can
* be used by the ULP to change the default primary path or to force
* sending a packet to a specific transport address. [All of this
* stuff happens when the INIT ACK arrives. This is a NON-BLOCKING
* function.]
*
* Mandatory attributes:
*
* o local SCTP instance name - obtained from the INITIALIZE operation.
* [This is the argument asoc.]
* o destination transport addr - specified as one of the transport
* addresses of the peer endpoint with which the association is to be
* established.
* [This is asoc->peer.active_path.]
* o outbound stream count - the number of outbound streams the ULP
* would like to open towards this peer endpoint.
* [BUG: This is not currently implemented.]
* Optional attributes:
*
* None.
*
* The return value is a disposition.
*/
sctp_disposition_t sctp_sf_do_prm_asoc(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl;
struct sctp_association* my_asoc;
/* The comment below says that we enter COOKIE-WAIT AFTER
* sending the INIT, but that doesn't actually work in our
* implementation...
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
/* RFC 2960 5.1 Normal Establishment of an Association
*
* A) "A" first sends an INIT chunk to "Z". In the INIT, "A"
* must provide its Verification Tag (Tag_A) in the Initiate
* Tag field. Tag_A SHOULD be a random number in the range of
* 1 to 4294967295 (see 5.3.1 for Tag value selection). ...
*/
repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0);
if (!repl)
goto nomem;
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
*/
my_asoc = (struct sctp_association *)asoc;
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc));
/* Choose transport for INIT. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
SCTP_CHUNK(repl));
/* After sending the INIT, "A" starts the T1-init timer and
* enters the COOKIE-WAIT state.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process the SEND primitive.
*
* Section: 10.1 ULP-to-SCTP
* E) Send
*
* Format: SEND(association id, buffer address, byte count [,context]
* [,stream id] [,life time] [,destination transport address]
* [,unorder flag] [,no-bundle flag] [,payload protocol-id] )
* -> result
*
* This is the main method to send user data via SCTP.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* o buffer address - the location where the user message to be
* transmitted is stored;
*
* o byte count - The size of the user data in number of bytes;
*
* Optional attributes:
*
* o context - an optional 32 bit integer that will be carried in the
* sending failure notification to the ULP if the transportation of
* this User Message fails.
*
* o stream id - to indicate which stream to send the data on. If not
* specified, stream 0 will be used.
*
* o life time - specifies the life time of the user data. The user data
* will not be sent by SCTP after the life time expires. This
* parameter can be used to avoid efforts to transmit stale
* user messages. SCTP notifies the ULP if the data cannot be
* initiated to transport (i.e. sent to the destination via SCTP's
* send primitive) within the life time variable. However, the
* user data will be transmitted if SCTP has attempted to transmit a
* chunk before the life time expired.
*
* o destination transport address - specified as one of the destination
* transport addresses of the peer endpoint to which this packet
* should be sent. Whenever possible, SCTP should use this destination
* transport address for sending the packets, instead of the current
* primary path.
*
* o unorder flag - this flag, if present, indicates that the user
* would like the data delivered in an unordered fashion to the peer
* (i.e., the U flag is set to 1 on all DATA chunks carrying this
* message).
*
* o no-bundle flag - instructs SCTP not to bundle this user data with
* other outbound DATA chunks. SCTP MAY still bundle even when
* this flag is present, when faced with network congestion.
*
* o payload protocol-id - A 32 bit unsigned integer that is to be
* passed to the peer indicating the type of payload protocol data
* being transmitted. This value is passed as opaque data by SCTP.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_datamsg *msg = arg;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process the SHUTDOWN primitive.
*
* Section: 10.1:
* C) Shutdown
*
* Format: SHUTDOWN(association id)
* -> result
*
* Gracefully closes an association. Any locally queued user data
* will be delivered to the peer. The association will be terminated only
* after the peer acknowledges all the SCTP packets sent. A success code
* will be returned on successful termination of the association. If
* attempting to terminate the association results in a failure, an error
* code shall be returned.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* Optional attributes:
*
* None.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
int disposition;
/* From 9.2 Shutdown of an Association
* Upon receipt of the SHUTDOWN primitive from its upper
* layer, the endpoint enters SHUTDOWN-PENDING state and
* remains there until all outstanding data has been
* acknowledged by its peer. The endpoint accepts no new data
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
arg, commands);
}
return disposition;
}
/*
* Process the ABORT primitive.
*
* Section: 10.1:
* C) Abort
*
* Format: Abort(association id [, cause code])
* -> result
*
* Ungracefully closes an association. Any locally queued user data
* will be discarded and an ABORT chunk is sent to the peer. A success code
* will be returned on successful abortion of the association. If
* attempting to abort the association results in a failure, an error
* code shall be returned.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* Optional attributes:
*
* o cause code - reason of the abort to be passed to the peer
*
* None.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_1_prm_abort(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* From 9.1 Abort of an Association
* Upon receipt of the ABORT primitive from its upper
* layer, the endpoint enters CLOSED state and
* discard all outstanding data has been
* acknowledged by its peer. The endpoint accepts no new data
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
struct sctp_chunk *abort = arg;
sctp_disposition_t retval;
retval = SCTP_DISPOSITION_CONSUME;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_USER_ABORT));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return retval;
}
/* We tried an illegal operation on an association which is closed. */
sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL));
return SCTP_DISPOSITION_CONSUME;
}
/* We tried an illegal operation on an association which is shutting
* down.
*/
sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR,
SCTP_ERROR(-ESHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_cookie_wait_prm_shutdown
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues a shutdown while in COOKIE_WAIT state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
/*
* sctp_cookie_echoed_prm_shutdown
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explcitly address this issue, but is the route through the
* state table when someone issues a shutdown while in COOKIE_ECHOED state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
*/
return sctp_sf_cookie_wait_prm_shutdown(ep, asoc, type, arg, commands);
}
/*
* sctp_sf_cookie_wait_prm_abort
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues an abort while in COOKIE_WAIT state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *abort = arg;
sctp_disposition_t retval;
/* Stop T1-init timer */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
retval = SCTP_DISPOSITION_CONSUME;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNREFUSED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_USER_ABORT));
return retval;
}
/*
* sctp_sf_cookie_echoed_prm_abort
*
* Section: 4 Note: 3
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explcitly address this issue, but is the route through the
* state table when someone issues an abort while in COOKIE_ECHOED state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
*/
return sctp_sf_cookie_wait_prm_abort(ep, asoc, type, arg, commands);
}
/*
* sctp_sf_shutdown_pending_prm_abort
*
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues an abort while in SHUTDOWN-PENDING state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Stop the T5-shutdown guard timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
}
/*
* sctp_sf_shutdown_sent_prm_abort
*
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues an abort while in SHUTDOWN-SENT state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Stop the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
/* Stop the T5-shutdown guard timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
}
/*
* sctp_sf_cookie_echoed_prm_abort
*
* Inputs
* (endpoint, asoc)
*
* The RFC does not explcitly address this issue, but is the route through the
* state table when someone issues an abort while in COOKIE_ECHOED state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* The same T2 timer, so we should be able to use
* common function with the SHUTDOWN-SENT state.
*/
return sctp_sf_shutdown_sent_prm_abort(ep, asoc, type, arg, commands);
}
/*
* Process the REQUESTHEARTBEAT primitive
*
* 10.1 ULP-to-SCTP
* J) Request Heartbeat
*
* Format: REQUESTHEARTBEAT(association id, destination transport address)
*
* -> result
*
* Instructs the local endpoint to perform a HeartBeat on the specified
* destination transport address of the given association. The returned
* result should indicate whether the transmission of the HEARTBEAT
* chunk to the destination address is successful.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* o destination transport address - the transport address of the
* association on which a heartbeat should be issued.
*/
sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type,
(struct sctp_transport *)arg, commands))
return SCTP_DISPOSITION_NOMEM;
/*
* RFC 2960 (bis), section 8.3
*
* D) Request an on-demand HEARTBEAT on a specific destination
* transport address of a given association.
*
* The endpoint should increment the respective error counter of
* the destination transport address each time a HEARTBEAT is sent
* to that address and not acknowledged within one RTO.
*
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
SCTP_TRANSPORT(arg));
return SCTP_DISPOSITION_CONSUME;
}
/*
* ADDIP Section 4.1 ASCONF Chunk Procedures
* When an endpoint has an ASCONF signaled change to be sent to the
* remote endpoint it should do A1 to A9
*/
sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Ignore the primitive event
*
* The return value is the disposition of the primitive.
*/
sctp_disposition_t sctp_sf_ignore_primitive(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
SCTP_DEBUG_PRINTK("Primitive type %d is ignored.\n", type.primitive);
return SCTP_DISPOSITION_DISCARD;
}
/***************************************************************************
* These are the state functions for the OTHER events.
***************************************************************************/
/*
* When the SCTP stack has no more user data to send or retransmit, this
* notification is given to the user. Also, at the time when a user app
* subscribes to this event, if there is no data to be sent or
* retransmit, the stack will immediately send up this notification.
*/
sctp_disposition_t sctp_sf_do_no_pending_tsn(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_ulpevent *event;
event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC);
if (!event)
return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Start the shutdown negotiation.
*
* From Section 9.2:
* Once all its outstanding data has been acknowledged, the endpoint
* shall send a SHUTDOWN chunk to its peer including in the Cumulative
* TSN Ack field the last sequential TSN it has received from the peer.
* It shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT
* state. If the timer expires, the endpoint must re-send the SHUTDOWN
* with the updated last sequential TSN received from its peer.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *reply;
/* Once all its outstanding data has been acknowledged, the
* endpoint shall send a SHUTDOWN chunk to its peer including
* in the Cumulative TSN Ack field the last sequential TSN it
* has received from the peer.
*/
reply = sctp_make_shutdown(asoc, NULL);
if (!reply)
goto nomem;
/* Set the transport for the SHUTDOWN chunk and the timeout for the
* T2-shutdown timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* It shall then start the T2-shutdown timer */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
/* RFC 4960 Section 9.2
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
if (asoc->autoclose)
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* and enter the SHUTDOWN-SENT state. */
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_SENT));
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Generate a SHUTDOWN ACK now that everything is SACK'd.
*
* From Section 9.2:
*
* If it has no more outstanding DATA chunks, the SHUTDOWN receiver
* shall send a SHUTDOWN ACK and start a T2-shutdown timer of its own,
* entering the SHUTDOWN-ACK-SENT state. If the timer expires, the
* endpoint must re-send the SHUTDOWN ACK.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
struct sctp_chunk *reply;
/* There are 2 ways of getting here:
* 1) called in response to a SHUTDOWN chunk
* 2) called when SCTP_EVENT_NO_PENDING_TSN event is issued.
*
* For the case (2), the arg parameter is set to NULL. We need
* to check that we have a chunk before accessing it's fields.
*/
if (chunk) {
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
}
/* If it has no more outstanding DATA chunks, the SHUTDOWN receiver
* shall send a SHUTDOWN ACK ...
*/
reply = sctp_make_shutdown_ack(asoc, chunk);
if (!reply)
goto nomem;
/* Set the transport for the SHUTDOWN ACK chunk and the timeout for
* the T2-shutdown timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* and start/restart a T2-shutdown timer of its own, */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
if (asoc->autoclose)
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* Enter the SHUTDOWN-ACK-SENT state. */
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_ACK_SENT));
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Ignore the event defined as other
*
* The return value is the disposition of the event.
*/
sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
SCTP_DEBUG_PRINTK("The event other type %d is ignored\n", type.other);
return SCTP_DISPOSITION_DISCARD;
}
/************************************************************
* These are the state functions for handling timeout events.
************************************************************/
/*
* RTX Timeout
*
* Section: 6.3.3 Handle T3-rtx Expiration
*
* Whenever the retransmission timer T3-rtx expires for a destination
* address, do the following:
* [See below]
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = arg;
SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
if (asoc->overall_error_count >= asoc->max_retrans) {
if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
/*
* We are here likely because the receiver had its rwnd
* closed for a while and we have not been able to
* transmit the locally queued data within the maximum
* retransmission attempts limit. Start the T5
* shutdown guard timer to give the receiver one last
* chance and some additional time to recover before
* aborting.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
}
/* E1) For the destination address for which the timer
* expires, adjust its ssthresh with rules defined in Section
* 7.2.3 and set the cwnd <- MTU.
*/
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*/
/* E3) Determine how many of the earliest (i.e., lowest TSN)
* outstanding DATA chunks for the address for which the
* T3-rtx has expired will fit into a single packet, subject
* to the MTU constraint for the path corresponding to the
* destination transport address to which the retransmission
* is being sent (this may be different from the address for
* which the timer expires [see Section 6.4]). Call this
* value K. Bundle and retransmit those K DATA chunks in a
* single packet to the destination endpoint.
*
* Note: Any DATA chunks that were sent to the address for
* which the T3-rtx timer expired but did not fit in one MTU
* (rule E3 above), should be marked for retransmission and
* sent as soon as cwnd allows (normally when a SACK arrives).
*/
/* Do some failure management (Section 8.2). */
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
/* NB: Rules E4 and F1 are implicit in R1. */
sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Generate delayed SACK on timeout
*
* Section: 6.2 Acknowledgement on Reception of DATA Chunks
*
* The guidelines on delayed acknowledgement algorithm specified in
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
* acknowledgement SHOULD be generated for at least every second packet
* (not every second DATA chunk) received, and SHOULD be generated
* within 200 ms of the arrival of any unacknowledged DATA chunk. In
* some situations it may be beneficial for an SCTP transmitter to be
* more conservative than the algorithms detailed in this document
* allow. However, an SCTP transmitter MUST NOT be more aggressive than
* the following algorithms allow.
*/
sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
SCTP_INC_STATS(SCTP_MIB_DELAY_SACK_EXPIREDS);
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_t1_init_timer_expire
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* RFC 2960 Section 4 Notes
* 2) If the T1-init timer expires, the endpoint MUST retransmit INIT
* and re-start the T1-init timer without changing state. This MUST
* be repeated up to 'Max.Init.Retransmits' times. After that, the
* endpoint MUST abort the initialization process and report the
* error to SCTP user.
*
* Outputs
* (timers, events)
*
*/
sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl = NULL;
struct sctp_bind_addr *bp;
int attempts = asoc->init_err_counter + 1;
SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
SCTP_INC_STATS(SCTP_MIB_T1_INIT_EXPIREDS);
if (attempts <= asoc->max_init_attempts) {
bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
if (!repl)
return SCTP_DISPOSITION_NOMEM;
/* Choose transport for INIT. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
SCTP_CHUNK(repl));
/* Issue a sideeffect to do the needed accounting. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
} else {
SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d"
" max_init_attempts: %d\n",
attempts, asoc->max_init_attempts);
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_t1_cookie_timer_expire
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* RFC 2960 Section 4 Notes
* 3) If the T1-cookie timer expires, the endpoint MUST retransmit
* COOKIE ECHO and re-start the T1-cookie timer without changing
* state. This MUST be repeated up to 'Max.Init.Retransmits' times.
* After that, the endpoint MUST abort the initialization process and
* report the error to SCTP user.
*
* Outputs
* (timers, events)
*
*/
sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl = NULL;
int attempts = asoc->init_err_counter + 1;
SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
SCTP_INC_STATS(SCTP_MIB_T1_COOKIE_EXPIREDS);
if (attempts <= asoc->max_init_attempts) {
repl = sctp_make_cookie_echo(asoc, NULL);
if (!repl)
return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
SCTP_CHUNK(repl));
/* Issue a sideeffect to do the needed accounting. */
sctp_add_cmd_sf(commands, SCTP_CMD_COOKIEECHO_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
}
return SCTP_DISPOSITION_CONSUME;
}
/* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN
* with the updated last sequential TSN received from its peer.
*
* An endpoint should limit the number of retransmissions of the
* SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'.
* If this threshold is exceeded the endpoint should destroy the TCB and
* MUST report the peer endpoint unreachable to the upper layer (and
* thus the association enters the CLOSED state). The reception of any
* packet from its peer (i.e. as the peer sends all of its queued DATA
* chunks) should clear the endpoint's retransmission count and restart
* the T2-Shutdown timer, giving its peer ample opportunity to transmit
* all of its queued DATA chunks that have not yet been sent.
*/
sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *reply = NULL;
SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
((struct sctp_association *)asoc)->shutdown_retries++;
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
switch (asoc->state) {
case SCTP_STATE_SHUTDOWN_SENT:
reply = sctp_make_shutdown(asoc, NULL);
break;
case SCTP_STATE_SHUTDOWN_ACK_SENT:
reply = sctp_make_shutdown_ack(asoc, NULL);
break;
default:
BUG();
break;
}
if (!reply)
goto nomem;
/* Do some failure management (Section 8.2).
* If we remove the transport an SHUTDOWN was last sent to, don't
* do failure management.
*/
if (asoc->shutdown_last_sent_to)
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
SCTP_TRANSPORT(asoc->shutdown_last_sent_to));
/* Set the transport for the SHUTDOWN/ACK chunk and the timeout for
* the T2-shutdown timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* Restart the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* ADDIP Section 4.1 ASCONF CHunk Procedures
* If the T4 RTO timer expires the endpoint should do B1 to B5
*/
sctp_disposition_t sctp_sf_t4_timer_expire(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = asoc->addip_last_asconf;
struct sctp_transport *transport = chunk->transport;
SCTP_INC_STATS(SCTP_MIB_T4_RTO_EXPIREDS);
/* ADDIP 4.1 B1) Increment the error counters and perform path failure
* detection on the appropriate destination address as defined in
* RFC2960 [5] section 8.1 and 8.2.
*/
if (transport)
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
SCTP_TRANSPORT(transport));
/* Reconfig T4 timer and transport. */
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
/* ADDIP 4.1 B2) Increment the association error counters and perform
* endpoint failure detection on the association as defined in
* RFC2960 [5] section 8.1 and 8.2.
* association error counter is incremented in SCTP_CMD_STRIKE.
*/
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
/* ADDIP 4.1 B3) Back-off the destination address RTO value to which
* the ASCONF chunk was sent by doubling the RTO timer value.
* This is done in SCTP_CMD_STRIKE.
*/
/* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible
* choose an alternate destination address (please refer to RFC2960
* [5] section 6.4.1). An endpoint MUST NOT add new parameters to this
* chunk, it MUST be the same (including its serial number) as the last
* ASCONF sent.
*/
sctp_chunk_hold(asoc->addip_last_asconf);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(asoc->addip_last_asconf));
/* ADDIP 4.1 B5) Restart the T-4 RTO timer. Note that if a different
* destination is selected, then the RTO used will be that of the new
* destination address.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
return SCTP_DISPOSITION_CONSUME;
}
/* sctpimpguide-05 Section 2.12.2
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
* At the expiration of this timer the sender SHOULD abort the association
* by sending an ABORT chunk.
*/
sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *reply = NULL;
SCTP_DEBUG_PRINTK("Timer T5 expired.\n");
SCTP_INC_STATS(SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
reply = sctp_make_abort(asoc, NULL, 0);
if (!reply)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Handle expiration of AUTOCLOSE timer. When the autoclose timer expires,
* the association is automatically closed by starting the shutdown process.
* The work that needs to be done is same as when SHUTDOWN is initiated by
* the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
*/
sctp_disposition_t sctp_sf_autoclose_timer_expire(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
int disposition;
SCTP_INC_STATS(SCTP_MIB_AUTOCLOSE_EXPIREDS);
/* From 9.2 Shutdown of an Association
* Upon receipt of the SHUTDOWN primitive from its upper
* layer, the endpoint enters SHUTDOWN-PENDING state and
* remains there until all outstanding data has been
* acknowledged by its peer. The endpoint accepts no new data
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
arg, commands);
}
return disposition;
}
/*****************************************************************************
* These are sa state functions which could apply to all types of events.
****************************************************************************/
/*
* This table entry is not implemented.
*
* Inputs
* (endpoint, asoc, chunk)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return SCTP_DISPOSITION_NOT_IMPL;
}
/*
* This table entry represents a bug.
*
* Inputs
* (endpoint, asoc, chunk)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return SCTP_DISPOSITION_BUG;
}
/*
* This table entry represents the firing of a timer in the wrong state.
* Since timer deletion cannot be guaranteed a timer 'may' end up firing
* when the association is in the wrong state. This event should
* be ignored, so as to prevent any rearming of the timer.
*
* Inputs
* (endpoint, asoc, chunk)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_timer_ignore(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
SCTP_DEBUG_PRINTK("Timer %d ignored.\n", type.chunk);
return SCTP_DISPOSITION_CONSUME;
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
/* Pull the SACK chunk based on the SACK header. */
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
{
struct sctp_sackhdr *sack;
unsigned int len;
__u16 num_blocks;
__u16 num_dup_tsns;
/* Protect ourselves from reading too far into
* the skb from a bogus sender.
*/
sack = (struct sctp_sackhdr *) chunk->skb->data;
num_blocks = ntohs(sack->num_gap_ack_blocks);
num_dup_tsns = ntohs(sack->num_dup_tsns);
len = sizeof(struct sctp_sackhdr);
len += (num_blocks + num_dup_tsns) * sizeof(__u32);
if (len > chunk->skb->len)
return NULL;
skb_pull(chunk->skb, len);
return sack;
}
/* Create an ABORT packet to be sent as a response, with the specified
* error causes.
*/
static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
const void *payload,
size_t paylen)
{
struct sctp_packet *packet;
struct sctp_chunk *abort;
packet = sctp_ootb_pkt_new(asoc, chunk);
if (packet) {
/* Make an ABORT.
* The T bit will be set if the asoc is NULL.
*/
abort = sctp_make_abort(asoc, chunk, paylen);
if (!abort) {
sctp_ootb_pkt_free(packet);
return NULL;
}
/* Reflect vtag if T-Bit is set */
if (sctp_test_T_bit(abort))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
/* Add specified error causes, i.e., payload, to the
* end of the chunk.
*/
sctp_addto_chunk(abort, paylen, payload);
/* Set the skb to the belonging sock for accounting. */
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
}
return packet;
}
/* Allocate a packet for responding in the OOTB conditions. */
static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
const struct sctp_chunk *chunk)
{
struct sctp_packet *packet;
struct sctp_transport *transport;
__u16 sport;
__u16 dport;
__u32 vtag;
/* Get the source and destination port from the inbound packet. */
sport = ntohs(chunk->sctp_hdr->dest);
dport = ntohs(chunk->sctp_hdr->source);
/* The V-tag is going to be the same as the inbound packet if no
* association exists, otherwise, use the peer's vtag.
*/
if (asoc) {
/* Special case the INIT-ACK as there is no peer's vtag
* yet.
*/
switch(chunk->chunk_hdr->type) {
case SCTP_CID_INIT_ACK:
{
sctp_initack_chunk_t *initack;
initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
vtag = ntohl(initack->init_hdr.init_tag);
break;
}
default:
vtag = asoc->peer.i.init_tag;
break;
}
} else {
/* Special case the INIT and stale COOKIE_ECHO as there is no
* vtag yet.
*/
switch(chunk->chunk_hdr->type) {
case SCTP_CID_INIT:
{
sctp_init_chunk_t *init;
init = (sctp_init_chunk_t *)chunk->chunk_hdr;
vtag = ntohl(init->init_hdr.init_tag);
break;
}
default:
vtag = ntohl(chunk->sctp_hdr->vtag);
break;
}
}
/* Make a transport for the bucket, Eliza... */
transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC);
if (!transport)
goto nomem;
/* Cache a route for the transport with the chunk's destination as
* the source address.
*/
sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
sctp_sk(sctp_get_ctl_sock()));
packet = sctp_packet_init(&transport->packet, transport, sport, dport);
packet = sctp_packet_config(packet, vtag, 0);
return packet;
nomem:
return NULL;
}
/* Free the packet allocated earlier for responding in the OOTB condition. */
void sctp_ootb_pkt_free(struct sctp_packet *packet)
{
sctp_transport_free(packet->transport);
}
/* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */
static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk)
{
struct sctp_packet *packet;
if (err_chunk) {
packet = sctp_ootb_pkt_new(asoc, chunk);
if (packet) {
struct sctp_signed_cookie *cookie;
/* Override the OOTB vtag from the cookie. */
cookie = chunk->subh.cookie_hdr;
packet->vtag = cookie->c.peer_vtag;
/* Set the skb to the belonging sock for accounting. */
err_chunk->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, err_chunk);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
} else
sctp_chunk_free (err_chunk);
}
}
/* Process a data chunk */
static int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands)
{
sctp_datahdr_t *data_hdr;
struct sctp_chunk *err;
size_t datalen;
sctp_verb_t deliver;
int tmp;
__u32 tsn;
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
struct sock *sk = asoc->base.sk;
u16 ssn;
u16 sid;
u8 ordered = 0;
data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
tsn = ntohl(data_hdr->tsn);
SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
/* ASSERT: Now skb->data is really the user data. */
/* Process ECN based congestion.
*
* Since the chunk structure is reused for all chunks within
* a packet, we use ecn_ce_done to track if we've already
* done CE processing for this packet.
*
* We need to do ECN processing even if we plan to discard the
* chunk later.
*/
if (!chunk->ecn_ce_done) {
struct sctp_af *af;
chunk->ecn_ce_done = 1;
af = sctp_get_af_specific(
ipver2af(ip_hdr(chunk->skb)->version));
if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
/* Do real work as sideffect. */
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
SCTP_U32(tsn));
}
}
tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
if (tmp < 0) {
/* The TSN is too high--silently discard the chunk and
* count on it getting retransmitted later.
*/
return SCTP_IERROR_HIGH_TSN;
} else if (tmp > 0) {
/* This is a duplicate. Record it. */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
return SCTP_IERROR_DUP_TSN;
}
/* This is a new TSN. */
/* Discard if there is no room in the receive window.
* Actually, allow a little bit of overflow (up to a MTU).
*/
datalen = ntohs(chunk->chunk_hdr->length);
datalen -= sizeof(sctp_data_chunk_t);
deliver = SCTP_CMD_CHUNK_ULP;
/* Think about partial delivery. */
if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
/* Even if we don't accept this chunk there is
* memory pressure.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
(datalen > asoc->rwnd + asoc->frag_point))) {
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
* malicious sender can still eat up all our buffer
* space and in the future we may want to detect and
* do more drastic reneging.
*/
if (sctp_tsnmap_has_gap(map) &&
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
deliver = SCTP_CMD_RENEGE;
} else {
SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
"rwnd: %d\n", tsn, datalen,
asoc->rwnd);
return SCTP_IERROR_IGNORE_TSN;
}
}
/*
* Also try to renege to limit our memory usage in the event that
* we are under memory pressure
* If we can't renege, don't worry about it, the sk_rmem_schedule
* in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
* memory usage too much
*/
if (*sk->sk_prot_creator->memory_pressure) {
if (sctp_tsnmap_has_gap(map) &&
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
SCTP_DEBUG_PRINTK("Under Pressure! Reneging for tsn:%u\n", tsn);
deliver = SCTP_CMD_RENEGE;
}
}
/*
* Section 3.3.10.9 No User Data (9)
*
* Cause of error
* ---------------
* No User Data: This error cause is returned to the originator of a
* DATA chunk if a received DATA chunk has no user data.
*/
if (unlikely(0 == datalen)) {
err = sctp_make_abort_no_data(asoc, chunk, tsn);
if (err) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_DATA));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_IERROR_NO_DATA;
}
chunk->data_accepted = 1;
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
else {
SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
ordered = 1;
}
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* If an endpoint receive a DATA chunk with an invalid stream
* identifier, it shall acknowledge the reception of the DATA chunk
* following the normal procedure, immediately send an ERROR chunk
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk.
*/
sid = ntohs(data_hdr->stream);
if (sid >= asoc->c.sinit_max_instreams) {
/* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
&data_hdr->stream,
sizeof(data_hdr->stream),
sizeof(u16));
if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
return SCTP_IERROR_BAD_STREAM;
}
/* Check to see if the SSN is possible for this TSN.
* The biggest gap we can record is 4K wide. Since SSNs wrap
* at an unsigned short, there is no way that an SSN can
* wrap and for a valid TSN. We can simply check if the current
* SSN is smaller then the next expected one. If it is, it wrapped
* and is invalid.
*/
ssn = ntohs(data_hdr->ssn);
if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) {
return SCTP_IERROR_PROTO_VIOLATION;
}
/* Send the data up to the user. Note: Schedule the
* SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
* chunk needs the updated rwnd.
*/
sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
return SCTP_IERROR_NO_ERROR;
}
| gpl-2.0 |
01org/edison-linux | net/mac80211/ocb.c | 646 | 6949 | /*
* OCB mode implementation
*
* Copyright: (c) 2014 Czech Technical University in Prague
* (c) 2014 Volkswagen Group Research
* Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz>
* Funded by: Volkswagen Group Research
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
#define IEEE80211_OCB_HOUSEKEEPING_INTERVAL (60 * HZ)
#define IEEE80211_OCB_PEER_INACTIVITY_LIMIT (240 * HZ)
#define IEEE80211_OCB_MAX_STA_ENTRIES 128
/**
* enum ocb_deferred_task_flags - mac80211 OCB deferred tasks
* @OCB_WORK_HOUSEKEEPING: run the periodic OCB housekeeping tasks
*
* These flags are used in @wrkq_flags field of &struct ieee80211_if_ocb
*/
enum ocb_deferred_task_flags {
OCB_WORK_HOUSEKEEPING,
};
void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, const u8 *addr,
u32 supp_rates)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
enum nl80211_bss_scan_width scan_width;
struct sta_info *sta;
int band;
/* XXX: Consider removing the least recently used entry and
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_OCB_MAX_STA_ENTRIES) {
net_info_ratelimited("%s: No room for a new OCB STA entry %pM\n",
sdata->name, addr);
return;
}
ocb_dbg(sdata, "Adding new OCB station %pM\n", addr);
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (WARN_ON_ONCE(!chanctx_conf)) {
rcu_read_unlock();
return;
}
band = chanctx_conf->def.chan->band;
scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
if (!sta)
return;
sta->last_rx = jiffies;
/* Add only mandatory rates for now */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] =
ieee80211_mandatory_rates(sband, scan_width);
spin_lock(&ifocb->incomplete_lock);
list_add(&sta->list, &ifocb->incomplete_stations);
spin_unlock(&ifocb->incomplete_lock);
ieee80211_queue_work(&local->hw, &sdata->work);
}
static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta)
__acquires(RCU)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
u8 addr[ETH_ALEN];
memcpy(addr, sta->sta.addr, ETH_ALEN);
ocb_dbg(sdata, "Adding new IBSS station %pM (dev=%s)\n",
addr, sdata->name);
sta_info_move_state(sta, IEEE80211_STA_AUTH);
sta_info_move_state(sta, IEEE80211_STA_ASSOC);
sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
rate_control_rate_init(sta);
/* If it fails, maybe we raced another insertion? */
if (sta_info_insert_rcu(sta))
return sta_info_get(sdata, addr);
return sta;
}
static void ieee80211_ocb_housekeeping(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
ocb_dbg(sdata, "Running ocb housekeeping\n");
ieee80211_sta_expire(sdata, IEEE80211_OCB_PEER_INACTIVITY_LIMIT);
mod_timer(&ifocb->housekeeping_timer,
round_jiffies(jiffies + IEEE80211_OCB_HOUSEKEEPING_INTERVAL));
}
void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
struct sta_info *sta;
if (ifocb->joined != true)
return;
sdata_lock(sdata);
spin_lock_bh(&ifocb->incomplete_lock);
while (!list_empty(&ifocb->incomplete_stations)) {
sta = list_first_entry(&ifocb->incomplete_stations,
struct sta_info, list);
list_del(&sta->list);
spin_unlock_bh(&ifocb->incomplete_lock);
ieee80211_ocb_finish_sta(sta);
rcu_read_unlock();
spin_lock_bh(&ifocb->incomplete_lock);
}
spin_unlock_bh(&ifocb->incomplete_lock);
if (test_and_clear_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags))
ieee80211_ocb_housekeeping(sdata);
sdata_unlock(sdata);
}
static void ieee80211_ocb_housekeeping_timer(unsigned long data)
{
struct ieee80211_sub_if_data *sdata = (void *)data;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
}
void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
setup_timer(&ifocb->housekeeping_timer,
ieee80211_ocb_housekeeping_timer,
(unsigned long)sdata);
INIT_LIST_HEAD(&ifocb->incomplete_stations);
spin_lock_init(&ifocb->incomplete_lock);
}
int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
struct ocb_setup *setup)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
u32 changed = BSS_CHANGED_OCB;
int err;
if (ifocb->joined == true)
return -EINVAL;
sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
sdata->smps_mode = IEEE80211_SMPS_OFF;
sdata->needed_rx_chains = sdata->local->rx_chains;
mutex_lock(&sdata->local->mtx);
err = ieee80211_vif_use_channel(sdata, &setup->chandef,
IEEE80211_CHANCTX_SHARED);
mutex_unlock(&sdata->local->mtx);
if (err)
return err;
ieee80211_bss_info_change_notify(sdata, changed);
ifocb->joined = true;
set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
netif_carrier_on(sdata->dev);
return 0;
}
int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
ifocb->joined = false;
sta_info_flush(sdata);
spin_lock_bh(&ifocb->incomplete_lock);
while (!list_empty(&ifocb->incomplete_stations)) {
sta = list_first_entry(&ifocb->incomplete_stations,
struct sta_info, list);
list_del(&sta->list);
spin_unlock_bh(&ifocb->incomplete_lock);
sta_info_free(local, sta);
spin_lock_bh(&ifocb->incomplete_lock);
}
spin_unlock_bh(&ifocb->incomplete_lock);
netif_carrier_off(sdata->dev);
clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB);
mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
mutex_unlock(&sdata->local->mtx);
skb_queue_purge(&sdata->skb_queue);
del_timer_sync(&sdata->u.ocb.housekeeping_timer);
/* If the timer fired while we waited for it, it will have
* requeued the work. Now the work will be running again
* but will not rearm the timer again because it checks
* whether we are connected to the network or not -- at this
* point we shouldn't be anymore.
*/
return 0;
}
| gpl-2.0 |
sfagmenos/ker | drivers/usb/usbip/vhci_rx.c | 1414 | 6365 | /*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include <linux/kthread.h>
#include <linux/slab.h>
#include "usbip_common.h"
#include "vhci.h"
/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
{
struct vhci_priv *priv, *tmp;
struct urb *urb = NULL;
int status;
list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
if (priv->seqnum != seqnum)
continue;
urb = priv->urb;
status = urb->status;
usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
urb, priv, seqnum);
switch (status) {
case -ENOENT:
/* fall through */
case -ECONNRESET:
dev_info(&urb->dev->dev,
"urb %p was unlinked %ssynchronuously.\n", urb,
status == -ENOENT ? "" : "a");
break;
case -EINPROGRESS:
/* no info output */
break;
default:
dev_info(&urb->dev->dev,
"urb %p may be in a error, status %d\n", urb,
status);
}
list_del(&priv->list);
kfree(priv);
urb->hcpriv = NULL;
break;
}
return urb;
}
static void vhci_recv_ret_submit(struct vhci_device *vdev,
struct usbip_header *pdu)
{
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
spin_unlock(&vdev->priv_lock);
if (!urb) {
pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
pr_info("max seqnum %d\n",
atomic_read(&the_controller->seqnum));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
}
/* unpack the pdu to a urb */
usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0);
/* recv transfer buffer */
if (usbip_recv_xbuff(ud, urb) < 0)
return;
/* recv iso_packet_descriptor */
if (usbip_recv_iso(ud, urb) < 0)
return;
/* restore the padding in iso packets */
usbip_pad_iso(ud, urb);
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
spin_lock(&the_controller->lock);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
spin_unlock(&the_controller->lock);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
usbip_dbg_vhci_rx("Leave\n");
}
static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
struct usbip_header *pdu)
{
struct vhci_unlink *unlink, *tmp;
spin_lock(&vdev->priv_lock);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
pr_info("unlink->seqnum %lu\n", unlink->seqnum);
if (unlink->seqnum == pdu->base.seqnum) {
usbip_dbg_vhci_rx("found pending unlink, %lu\n",
unlink->seqnum);
list_del(&unlink->list);
spin_unlock(&vdev->priv_lock);
return unlink;
}
}
spin_unlock(&vdev->priv_lock);
return NULL;
}
static void vhci_recv_ret_unlink(struct vhci_device *vdev,
struct usbip_header *pdu)
{
struct vhci_unlink *unlink;
struct urb *urb;
usbip_dump_header(pdu);
unlink = dequeue_pending_unlink(vdev, pdu);
if (!unlink) {
pr_info("cannot find the pending unlink %u\n",
pdu->base.seqnum);
return;
}
spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
spin_unlock(&vdev->priv_lock);
if (!urb) {
/*
* I get the result of a unlink request. But, it seems that I
* already received the result of its submit result and gave
* back the URB.
*/
pr_info("the urb (seqnum %d) was already given back\n",
pdu->base.seqnum);
} else {
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
/* If unlink is successful, status is -ECONNRESET */
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
spin_lock(&the_controller->lock);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
spin_unlock(&the_controller->lock);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
}
kfree(unlink);
}
static int vhci_priv_tx_empty(struct vhci_device *vdev)
{
int empty = 0;
spin_lock(&vdev->priv_lock);
empty = list_empty(&vdev->priv_rx);
spin_unlock(&vdev->priv_lock);
return empty;
}
/* recv a pdu */
static void vhci_rx_pdu(struct usbip_device *ud)
{
int ret;
struct usbip_header pdu;
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
usbip_dbg_vhci_rx("Enter\n");
memset(&pdu, 0, sizeof(pdu));
/* receive a pdu header */
ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret < 0) {
if (ret == -ECONNRESET)
pr_info("connection reset by peer\n");
else if (ret == -EAGAIN) {
/* ignore if connection was idle */
if (vhci_priv_tx_empty(vdev))
return;
pr_info("connection timed out with pending urbs\n");
} else if (ret != -ERESTARTSYS)
pr_info("xmit failed %d\n", ret);
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
}
if (ret == 0) {
pr_info("connection closed");
usbip_event_add(ud, VDEV_EVENT_DOWN);
return;
}
if (ret != sizeof(pdu)) {
pr_err("received pdu size is %d, should be %d\n", ret,
(unsigned int)sizeof(pdu));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
}
usbip_header_correct_endian(&pdu, 0);
if (usbip_dbg_flag_vhci_rx)
usbip_dump_header(&pdu);
switch (pdu.base.command) {
case USBIP_RET_SUBMIT:
vhci_recv_ret_submit(vdev, &pdu);
break;
case USBIP_RET_UNLINK:
vhci_recv_ret_unlink(vdev, &pdu);
break;
default:
/* NOT REACHED */
pr_err("unknown pdu %u\n", pdu.base.command);
usbip_dump_header(&pdu);
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
break;
}
}
int vhci_rx_loop(void *data)
{
struct usbip_device *ud = data;
while (!kthread_should_stop()) {
if (usbip_event_happened(ud))
break;
vhci_rx_pdu(ud);
}
return 0;
}
| gpl-2.0 |
codesnake/linux | drivers/mtd/tests/mtd_oobtest.c | 2182 | 17036 | /*
* Copyright (C) 2006-2008 Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; see the file COPYING. If not, write to the Free Software
* Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Test OOB read and write on MTD device.
*
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/random.h>
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
static struct mtd_info *mtd;
static unsigned char *readbuf;
static unsigned char *writebuf;
static unsigned char *bbt;
static int ebcnt;
static int pgcnt;
static int errcnt;
static int use_offset;
static int use_len;
static int use_len_max;
static int vary_offset;
static struct rnd_state rnd_state;
static int erase_eraseblock(int ebnum)
{
int err;
struct erase_info ei;
loff_t addr = ebnum * mtd->erasesize;
memset(&ei, 0, sizeof(struct erase_info));
ei.mtd = mtd;
ei.addr = addr;
ei.len = mtd->erasesize;
err = mtd_erase(mtd, &ei);
if (err) {
pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
pr_err("some erase error occurred at EB %d\n", ebnum);
return -EIO;
}
return 0;
}
static int erase_whole_device(void)
{
int err;
unsigned int i;
pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = erase_eraseblock(i);
if (err)
return err;
cond_resched();
}
pr_info("erased %u eraseblocks\n", i);
return 0;
}
static void do_vary_offset(void)
{
use_len -= 1;
if (use_len < 1) {
use_offset += 1;
if (use_offset >= use_len_max)
use_offset = 0;
use_len = use_len_max - use_offset;
}
}
static int write_eraseblock(int ebnum)
{
int i;
struct mtd_oob_ops ops;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
prandom_bytes_state(&rnd_state, writebuf, use_len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = use_len;
ops.oobretlen = 0;
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
pr_err("error: writeoob failed at %#llx\n",
(long long)addr);
pr_err("error: use_len %d, use_offset %d\n",
use_len, use_offset);
errcnt += 1;
return err ? err : -1;
}
if (vary_offset)
do_vary_offset();
}
return err;
}
static int write_whole_device(void)
{
int err;
unsigned int i;
pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = write_eraseblock(i);
if (err)
return err;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
pr_info("written %u eraseblocks\n", i);
return 0;
}
static int verify_eraseblock(int ebnum)
{
int i;
struct mtd_oob_ops ops;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
prandom_bytes_state(&rnd_state, writebuf, use_len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = use_len;
ops.oobretlen = 0;
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf, writebuf, use_len)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
pr_err("error: too many errors\n");
return -1;
}
}
if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
int k;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf + use_offset, writebuf, use_len)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
pr_err("error: too many errors\n");
return -1;
}
}
for (k = 0; k < use_offset; ++k)
if (readbuf[k] != 0xff) {
pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
pr_err("error: too "
"many errors\n");
return -1;
}
}
for (k = use_offset + use_len;
k < mtd->ecclayout->oobavail; ++k)
if (readbuf[k] != 0xff) {
pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
pr_err("error: too "
"many errors\n");
return -1;
}
}
}
if (vary_offset)
do_vary_offset();
}
return err;
}
static int verify_eraseblock_in_one_go(int ebnum)
{
struct mtd_oob_ops ops;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->ecclayout->oobavail * pgcnt;
prandom_bytes_state(&rnd_state, writebuf, len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = len;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf, writebuf, len)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
pr_err("error: too many errors\n");
return -1;
}
}
return err;
}
static int verify_all_eraseblocks(void)
{
int err;
unsigned int i;
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = verify_eraseblock(i);
if (err)
return err;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
pr_info("verified %u eraseblocks\n", i);
return 0;
}
static int is_block_bad(int ebnum)
{
int ret;
loff_t addr = ebnum * mtd->erasesize;
ret = mtd_block_isbad(mtd, addr);
if (ret)
pr_info("block %d is bad\n", ebnum);
return ret;
}
static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
bbt = kmalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
static int __init mtd_oobtest_init(void)
{
int err = 0;
unsigned int i;
uint64_t tmp;
struct mtd_oob_ops ops;
loff_t addr = 0, addr0;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
pr_info("Please specify a valid mtd-device via module parameter\n");
pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
pr_info("this test requires NAND flash\n");
goto out;
}
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
err = -ENOMEM;
readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!readbuf) {
pr_err("error: cannot allocate memory\n");
goto out;
}
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!writebuf) {
pr_err("error: cannot allocate memory\n");
goto out;
}
err = scan_for_bad_eraseblocks();
if (err)
goto out;
use_offset = 0;
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 0;
/* First test: write all OOB, read it back and verify */
pr_info("test 1 of 5\n");
err = erase_whole_device();
if (err)
goto out;
prandom_seed_state(&rnd_state, 1);
err = write_whole_device();
if (err)
goto out;
prandom_seed_state(&rnd_state, 1);
err = verify_all_eraseblocks();
if (err)
goto out;
/*
* Second test: write all OOB, a block at a time, read it back and
* verify.
*/
pr_info("test 2 of 5\n");
err = erase_whole_device();
if (err)
goto out;
prandom_seed_state(&rnd_state, 3);
err = write_whole_device();
if (err)
goto out;
/* Check all eraseblocks */
prandom_seed_state(&rnd_state, 3);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = verify_eraseblock_in_one_go(i);
if (err)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
pr_info("verified %u eraseblocks\n", i);
/*
* Third test: write OOB at varying offsets and lengths, read it back
* and verify.
*/
pr_info("test 3 of 5\n");
err = erase_whole_device();
if (err)
goto out;
/* Write all eraseblocks */
use_offset = 0;
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
prandom_seed_state(&rnd_state, 5);
err = write_whole_device();
if (err)
goto out;
/* Check all eraseblocks */
use_offset = 0;
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
prandom_seed_state(&rnd_state, 5);
err = verify_all_eraseblocks();
if (err)
goto out;
use_offset = 0;
use_len = mtd->ecclayout->oobavail;
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 0;
/* Fourth test: try to write off end of device */
pr_info("test 4 of 5\n");
err = erase_whole_device();
if (err)
goto out;
addr0 = 0;
for (i = 0; i < ebcnt && bbt[i]; ++i)
addr0 += mtd->erasesize;
/* Attempt to write off end of OOB */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = 1;
ops.oobretlen = 0;
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
pr_info("attempting to start write past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
pr_info("error occurred as expected\n");
err = 0;
} else {
pr_err("error: can write past end of OOB\n");
errcnt += 1;
}
/* Attempt to read off end of OOB */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = 1;
ops.oobretlen = 0;
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
pr_info("attempting to start read past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
if (err) {
pr_info("error occurred as expected\n");
err = 0;
} else {
pr_err("error: can read past end of OOB\n");
errcnt += 1;
}
if (bbt[ebcnt - 1])
pr_info("skipping end of device tests because last "
"block is bad\n");
else {
/* Attempt to write off end of device */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail + 1;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
pr_info("attempting to write past end of device\n");
pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
pr_info("error occurred as expected\n");
err = 0;
} else {
pr_err("error: wrote past end of device\n");
errcnt += 1;
}
/* Attempt to read off end of device */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail + 1;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
pr_info("error occurred as expected\n");
err = 0;
} else {
pr_err("error: read past end of device\n");
errcnt += 1;
}
err = erase_eraseblock(ebcnt - 1);
if (err)
goto out;
/* Attempt to write off end of device */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail;
ops.oobretlen = 0;
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
pr_info("attempting to write past end of device\n");
pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
pr_info("error occurred as expected\n");
err = 0;
} else {
pr_err("error: wrote past end of device\n");
errcnt += 1;
}
/* Attempt to read off end of device */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail;
ops.oobretlen = 0;
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
pr_info("error occurred as expected\n");
err = 0;
} else {
pr_err("error: read past end of device\n");
errcnt += 1;
}
}
/* Fifth test: write / read across block boundaries */
pr_info("test 5 of 5\n");
/* Erase all eraseblocks */
err = erase_whole_device();
if (err)
goto out;
/* Write all eraseblocks */
prandom_seed_state(&rnd_state, 11);
pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt - 1; ++i) {
int cnt = 2;
int pg;
size_t sz = mtd->ecclayout->oobavail;
if (bbt[i] || bbt[i + 1])
continue;
addr = (i + 1) * mtd->erasesize - mtd->writesize;
for (pg = 0; pg < cnt; ++pg) {
prandom_bytes_state(&rnd_state, writebuf, sz);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = sz;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
err = mtd_write_oob(mtd, addr, &ops);
if (err)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
cond_resched();
addr += mtd->writesize;
}
}
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
prandom_seed_state(&rnd_state, 11);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt - 1; ++i) {
if (bbt[i] || bbt[i + 1])
continue;
prandom_bytes_state(&rnd_state, writebuf,
mtd->ecclayout->oobavail * 2);
addr = (i + 1) * mtd->erasesize - mtd->writesize;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->ecclayout->oobavail * 2;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err)
goto out;
if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
pr_err("error: too many errors\n");
goto out;
}
}
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
pr_info("verified %u eraseblocks\n", i);
pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
kfree(writebuf);
kfree(readbuf);
put_mtd_device(mtd);
if (err)
pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
module_init(mtd_oobtest_init);
static void __exit mtd_oobtest_exit(void)
{
return;
}
module_exit(mtd_oobtest_exit);
MODULE_DESCRIPTION("Out-of-band test module");
MODULE_AUTHOR("Adrian Hunter");
MODULE_LICENSE("GPL");
| gpl-2.0 |
vm03/android_kernel_asus_P024 | drivers/net/team/team_mode_random.c | 2182 | 1786 | /*
* drivers/net/team/team_mode_random.c - Random mode for team
* Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/reciprocal_div.h>
#include <linux/if_team.h>
static u32 random_N(unsigned int N)
{
return reciprocal_divide(prandom_u32(), N);
}
static bool rnd_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *port;
int port_index;
port_index = random_N(team->en_port_count);
port = team_get_port_by_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;
port = team_get_first_port_txable_rcu(team, port);
if (unlikely(!port))
goto drop;
if (team_dev_queue_xmit(team, port, skb))
return false;
return true;
drop:
dev_kfree_skb_any(skb);
return false;
}
static const struct team_mode_ops rnd_mode_ops = {
.transmit = rnd_transmit,
.port_enter = team_modeop_port_enter,
.port_change_dev_addr = team_modeop_port_change_dev_addr,
};
static const struct team_mode rnd_mode = {
.kind = "random",
.owner = THIS_MODULE,
.ops = &rnd_mode_ops,
};
static int __init rnd_init_module(void)
{
return team_mode_register(&rnd_mode);
}
static void __exit rnd_cleanup_module(void)
{
team_mode_unregister(&rnd_mode);
}
module_init(rnd_init_module);
module_exit(rnd_cleanup_module);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
MODULE_DESCRIPTION("Random mode for team");
MODULE_ALIAS("team-mode-random");
| gpl-2.0 |
emwno/android_kernel_konaxx | drivers/usb/host/uhci-hub.c | 2438 | 11234 | /*
* Universal Host Controller Interface driver for USB.
*
* Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
* (C) Copyright 1999 Randy Dunlap
* (C) Copyright 1999 Georg Acher, acher@in.tum.de
* (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
* (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
* (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
*/
static const __u8 root_hub_hub_des[] =
{
0x09, /* __u8 bLength; */
0x29, /* __u8 bDescriptorType; Hub-descriptor */
0x02, /* __u8 bNbrPorts; */
0x0a, /* __u16 wHubCharacteristics; */
0x00, /* (per-port OC, no power switching) */
0x01, /* __u8 bPwrOn2pwrGood; 2ms */
0x00, /* __u8 bHubContrCurrent; 0 mA */
0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
};
#define UHCI_RH_MAXCHILD 7
/* must write as zeroes */
#define WZ_BITS (USBPORTSC_RES2 | USBPORTSC_RES3 | USBPORTSC_RES4)
/* status change bits: nonzero writes will clear */
#define RWC_BITS (USBPORTSC_OCC | USBPORTSC_PEC | USBPORTSC_CSC)
/* suspend/resume bits: port suspended or port resuming */
#define SUSPEND_BITS (USBPORTSC_SUSP | USBPORTSC_RD)
/* A port that either is connected or has a changed-bit set will prevent
* us from AUTO_STOPPING.
*/
static int any_ports_active(struct uhci_hcd *uhci)
{
int port;
for (port = 0; port < uhci->rh_numports; ++port) {
if ((uhci_readw(uhci, USBPORTSC1 + port * 2) &
(USBPORTSC_CCS | RWC_BITS)) ||
test_bit(port, &uhci->port_c_suspend))
return 1;
}
return 0;
}
static inline int get_hub_status_data(struct uhci_hcd *uhci, char *buf)
{
int port;
int mask = RWC_BITS;
/* Some boards (both VIA and Intel apparently) report bogus
* overcurrent indications, causing massive log spam unless
* we completely ignore them. This doesn't seem to be a problem
* with the chipset so much as with the way it is connected on
* the motherboard; if the overcurrent input is left to float
* then it may constantly register false positives. */
if (ignore_oc)
mask &= ~USBPORTSC_OCC;
*buf = 0;
for (port = 0; port < uhci->rh_numports; ++port) {
if ((uhci_readw(uhci, USBPORTSC1 + port * 2) & mask) ||
test_bit(port, &uhci->port_c_suspend))
*buf |= (1 << (port + 1));
}
return !!*buf;
}
#define OK(x) len = (x); break
#define CLR_RH_PORTSTAT(x) \
status = uhci_readw(uhci, port_addr); \
status &= ~(RWC_BITS|WZ_BITS); \
status &= ~(x); \
status |= RWC_BITS & (x); \
uhci_writew(uhci, status, port_addr)
#define SET_RH_PORTSTAT(x) \
status = uhci_readw(uhci, port_addr); \
status |= (x); \
status &= ~(RWC_BITS|WZ_BITS); \
uhci_writew(uhci, status, port_addr)
/* UHCI controllers don't automatically stop resume signalling after 20 msec,
* so we have to poll and check timeouts in order to take care of it.
*/
static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
unsigned long port_addr)
{
int status;
int i;
if (uhci_readw(uhci, port_addr) & SUSPEND_BITS) {
CLR_RH_PORTSTAT(SUSPEND_BITS);
if (test_bit(port, &uhci->resuming_ports))
set_bit(port, &uhci->port_c_suspend);
/* The controller won't actually turn off the RD bit until
* it has had a chance to send a low-speed EOP sequence,
* which is supposed to take 3 bit times (= 2 microseconds).
* Experiments show that some controllers take longer, so
* we'll poll for completion. */
for (i = 0; i < 10; ++i) {
if (!(uhci_readw(uhci, port_addr) & SUSPEND_BITS))
break;
udelay(1);
}
}
clear_bit(port, &uhci->resuming_ports);
}
/* Wait for the UHCI controller in HP's iLO2 server management chip.
* It can take up to 250 us to finish a reset and set the CSC bit.
*/
static void wait_for_HP(struct uhci_hcd *uhci, unsigned long port_addr)
{
int i;
for (i = 10; i < 250; i += 10) {
if (uhci_readw(uhci, port_addr) & USBPORTSC_CSC)
return;
udelay(10);
}
/* Log a warning? */
}
static void uhci_check_ports(struct uhci_hcd *uhci)
{
unsigned int port;
unsigned long port_addr;
int status;
for (port = 0; port < uhci->rh_numports; ++port) {
port_addr = USBPORTSC1 + 2 * port;
status = uhci_readw(uhci, port_addr);
if (unlikely(status & USBPORTSC_PR)) {
if (time_after_eq(jiffies, uhci->ports_timeout)) {
CLR_RH_PORTSTAT(USBPORTSC_PR);
udelay(10);
/* HP's server management chip requires
* a longer delay. */
if (uhci->wait_for_hp)
wait_for_HP(uhci, port_addr);
/* If the port was enabled before, turning
* reset on caused a port enable change.
* Turning reset off causes a port connect
* status change. Clear these changes. */
CLR_RH_PORTSTAT(USBPORTSC_CSC | USBPORTSC_PEC);
SET_RH_PORTSTAT(USBPORTSC_PE);
}
}
if (unlikely(status & USBPORTSC_RD)) {
if (!test_bit(port, &uhci->resuming_ports)) {
/* Port received a wakeup request */
set_bit(port, &uhci->resuming_ports);
uhci->ports_timeout = jiffies +
msecs_to_jiffies(25);
/* Make sure we see the port again
* after the resuming period is over. */
mod_timer(&uhci_to_hcd(uhci)->rh_timer,
uhci->ports_timeout);
} else if (time_after_eq(jiffies,
uhci->ports_timeout)) {
uhci_finish_suspend(uhci, port, port_addr);
}
}
}
}
static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
int status = 0;
spin_lock_irqsave(&uhci->lock, flags);
uhci_scan_schedule(uhci);
if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
goto done;
uhci_check_ports(uhci);
status = get_hub_status_data(uhci, buf);
switch (uhci->rh_state) {
case UHCI_RH_SUSPENDING:
case UHCI_RH_SUSPENDED:
/* if port change, ask to be resumed */
if (status || uhci->resuming_ports)
usb_hcd_resume_root_hub(hcd);
break;
case UHCI_RH_AUTO_STOPPED:
/* if port change, auto start */
if (status)
wakeup_rh(uhci);
break;
case UHCI_RH_RUNNING:
/* are any devices attached? */
if (!any_ports_active(uhci)) {
uhci->rh_state = UHCI_RH_RUNNING_NODEVS;
uhci->auto_stop_time = jiffies + HZ;
}
break;
case UHCI_RH_RUNNING_NODEVS:
/* auto-stop if nothing connected for 1 second */
if (any_ports_active(uhci))
uhci->rh_state = UHCI_RH_RUNNING;
else if (time_after_eq(jiffies, uhci->auto_stop_time))
suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
break;
default:
break;
}
done:
spin_unlock_irqrestore(&uhci->lock, flags);
return status;
}
/* size of returned buffer is part of USB spec */
static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int status, lstatus, retval = 0, len = 0;
unsigned int port = wIndex - 1;
unsigned long port_addr = USBPORTSC1 + 2 * port;
u16 wPortChange, wPortStatus;
unsigned long flags;
if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
return -ETIMEDOUT;
spin_lock_irqsave(&uhci->lock, flags);
switch (typeReq) {
case GetHubStatus:
*(__le32 *)buf = cpu_to_le32(0);
OK(4); /* hub power */
case GetPortStatus:
if (port >= uhci->rh_numports)
goto err;
uhci_check_ports(uhci);
status = uhci_readw(uhci, port_addr);
/* Intel controllers report the OverCurrent bit active on.
* VIA controllers report it active off, so we'll adjust the
* bit value. (It's not standardized in the UHCI spec.)
*/
if (uhci->oc_low)
status ^= USBPORTSC_OC;
/* UHCI doesn't support C_RESET (always false) */
wPortChange = lstatus = 0;
if (status & USBPORTSC_CSC)
wPortChange |= USB_PORT_STAT_C_CONNECTION;
if (status & USBPORTSC_PEC)
wPortChange |= USB_PORT_STAT_C_ENABLE;
if ((status & USBPORTSC_OCC) && !ignore_oc)
wPortChange |= USB_PORT_STAT_C_OVERCURRENT;
if (test_bit(port, &uhci->port_c_suspend)) {
wPortChange |= USB_PORT_STAT_C_SUSPEND;
lstatus |= 1;
}
if (test_bit(port, &uhci->resuming_ports))
lstatus |= 4;
/* UHCI has no power switching (always on) */
wPortStatus = USB_PORT_STAT_POWER;
if (status & USBPORTSC_CCS)
wPortStatus |= USB_PORT_STAT_CONNECTION;
if (status & USBPORTSC_PE) {
wPortStatus |= USB_PORT_STAT_ENABLE;
if (status & SUSPEND_BITS)
wPortStatus |= USB_PORT_STAT_SUSPEND;
}
if (status & USBPORTSC_OC)
wPortStatus |= USB_PORT_STAT_OVERCURRENT;
if (status & USBPORTSC_PR)
wPortStatus |= USB_PORT_STAT_RESET;
if (status & USBPORTSC_LSDA)
wPortStatus |= USB_PORT_STAT_LOW_SPEED;
if (wPortChange)
dev_dbg(uhci_dev(uhci), "port %d portsc %04x,%02x\n",
wIndex, status, lstatus);
*(__le16 *)buf = cpu_to_le16(wPortStatus);
*(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
OK(4);
case SetHubFeature: /* We don't implement these */
case ClearHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
OK(0);
default:
goto err;
}
break;
case SetPortFeature:
if (port >= uhci->rh_numports)
goto err;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
SET_RH_PORTSTAT(USBPORTSC_SUSP);
OK(0);
case USB_PORT_FEAT_RESET:
SET_RH_PORTSTAT(USBPORTSC_PR);
/* Reset terminates Resume signalling */
uhci_finish_suspend(uhci, port, port_addr);
/* USB v2.0 7.1.7.5 */
uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
OK(0);
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
OK(0);
default:
goto err;
}
break;
case ClearPortFeature:
if (port >= uhci->rh_numports)
goto err;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
CLR_RH_PORTSTAT(USBPORTSC_PE);
/* Disable terminates Resume signalling */
uhci_finish_suspend(uhci, port, port_addr);
OK(0);
case USB_PORT_FEAT_C_ENABLE:
CLR_RH_PORTSTAT(USBPORTSC_PEC);
OK(0);
case USB_PORT_FEAT_SUSPEND:
if (!(uhci_readw(uhci, port_addr) & USBPORTSC_SUSP)) {
/* Make certain the port isn't suspended */
uhci_finish_suspend(uhci, port, port_addr);
} else if (!test_and_set_bit(port,
&uhci->resuming_ports)) {
SET_RH_PORTSTAT(USBPORTSC_RD);
/* The controller won't allow RD to be set
* if the port is disabled. When this happens
* just skip the Resume signalling.
*/
if (!(uhci_readw(uhci, port_addr) &
USBPORTSC_RD))
uhci_finish_suspend(uhci, port,
port_addr);
else
/* USB v2.0 7.1.7.7 */
uhci->ports_timeout = jiffies +
msecs_to_jiffies(20);
}
OK(0);
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(port, &uhci->port_c_suspend);
OK(0);
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
goto err;
case USB_PORT_FEAT_C_CONNECTION:
CLR_RH_PORTSTAT(USBPORTSC_CSC);
OK(0);
case USB_PORT_FEAT_C_OVER_CURRENT:
CLR_RH_PORTSTAT(USBPORTSC_OCC);
OK(0);
case USB_PORT_FEAT_C_RESET:
/* this driver won't report these */
OK(0);
default:
goto err;
}
break;
case GetHubDescriptor:
len = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
memcpy(buf, root_hub_hub_des, len);
if (len > 2)
buf[2] = uhci->rh_numports;
OK(len);
default:
err:
retval = -EPIPE;
}
spin_unlock_irqrestore(&uhci->lock, flags);
return retval;
}
| gpl-2.0 |
TaichiN/android_kernel_samsung_tuna | net/netfilter/ipvs/ip_vs_proto_tcp.c | 2950 | 19645 | /*
* ip_vs_proto_tcp.c: TCP load balancing support for IPVS
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
*
* Network name space (netns) aware.
* Global data moved to netns i.e struct netns_ipvs
* tcp_timeouts table has copy per netns in a hash table per
* protocol ip_vs_proto_data and is handled by netns
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/tcp.h> /* for tcphdr */
#include <net/ip.h>
#include <net/tcp.h> /* for csum_tcpudp_magic */
#include <net/ip6_checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/ip_vs.h>
static int
tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
struct net *net;
struct ip_vs_service *svc;
struct tcphdr _tcph, *th;
struct ip_vs_iphdr iph;
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph);
if (th == NULL) {
*verdict = NF_DROP;
return 0;
}
net = skb_net(skb);
/* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
if (th->syn &&
(svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
&iph.daddr, th->dest))) {
int ignored;
if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
*/
ip_vs_service_put(svc);
*verdict = NF_DROP;
return 0;
}
/*
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
*cpp = ip_vs_schedule(svc, skb, pd, &ignored);
if (!*cpp && ignored <= 0) {
if (!ignored)
*verdict = ip_vs_leave(svc, skb, pd);
else {
ip_vs_service_put(svc);
*verdict = NF_DROP;
}
return 0;
}
ip_vs_service_put(svc);
}
/* NF_ACCEPT */
return 1;
}
static inline void
tcp_fast_csum_update(int af, struct tcphdr *tcph,
const union nf_inet_addr *oldip,
const union nf_inet_addr *newip,
__be16 oldport, __be16 newport)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
tcph->check =
csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
ip_vs_check_diff2(oldport, newport,
~csum_unfold(tcph->check))));
else
#endif
tcph->check =
csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
ip_vs_check_diff2(oldport, newport,
~csum_unfold(tcph->check))));
}
static inline void
tcp_partial_csum_update(int af, struct tcphdr *tcph,
const union nf_inet_addr *oldip,
const union nf_inet_addr *newip,
__be16 oldlen, __be16 newlen)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
tcph->check =
~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
ip_vs_check_diff2(oldlen, newlen,
csum_unfold(tcph->check))));
else
#endif
tcph->check =
~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
ip_vs_check_diff2(oldlen, newlen,
csum_unfold(tcph->check))));
}
static int
tcp_snat_handler(struct sk_buff *skb,
struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
struct tcphdr *tcph;
unsigned int tcphoff;
int oldlen;
int payload_csum = 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
tcphoff = sizeof(struct ipv6hdr);
else
#endif
tcphoff = ip_hdrlen(skb);
oldlen = skb->len - tcphoff;
/* csum_check requires unshared skb */
if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
if (!(ret = ip_vs_app_pkt_out(cp, skb)))
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 1)
oldlen = skb->len - tcphoff;
else
payload_csum = 1;
}
tcph = (void *)skb_network_header(skb) + tcphoff;
tcph->source = cp->vport;
/* Adjust TCP checksums */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
htons(oldlen),
htons(skb->len - tcphoff));
} else if (!payload_csum) {
/* Only port and addr are changed, do fast csum update */
tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
cp->dport, cp->vport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = (cp->app && pp->csum_check) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
tcph->check = 0;
skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
&cp->caddr.in6,
skb->len - tcphoff,
cp->protocol, skb->csum);
else
#endif
tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
cp->caddr.ip,
skb->len - tcphoff,
cp->protocol,
skb->csum);
skb->ip_summed = CHECKSUM_UNNECESSARY;
IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
pp->name, tcph->check,
(char*)&(tcph->check) - (char*)tcph);
}
return 1;
}
static int
tcp_dnat_handler(struct sk_buff *skb,
struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
struct tcphdr *tcph;
unsigned int tcphoff;
int oldlen;
int payload_csum = 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
tcphoff = sizeof(struct ipv6hdr);
else
#endif
tcphoff = ip_hdrlen(skb);
oldlen = skb->len - tcphoff;
/* csum_check requires unshared skb */
if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
return 0;
/*
* Attempt ip_vs_app call.
* It will fix ip_vs_conn and iph ack_seq stuff
*/
if (!(ret = ip_vs_app_pkt_in(cp, skb)))
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 1)
oldlen = skb->len - tcphoff;
else
payload_csum = 1;
}
tcph = (void *)skb_network_header(skb) + tcphoff;
tcph->dest = cp->dport;
/*
* Adjust TCP checksums
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
htons(oldlen),
htons(skb->len - tcphoff));
} else if (!payload_csum) {
/* Only port and addr are changed, do fast csum update */
tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
cp->vport, cp->dport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = (cp->app && pp->csum_check) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
tcph->check = 0;
skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
tcph->check = csum_ipv6_magic(&cp->caddr.in6,
&cp->daddr.in6,
skb->len - tcphoff,
cp->protocol, skb->csum);
else
#endif
tcph->check = csum_tcpudp_magic(cp->caddr.ip,
cp->daddr.ip,
skb->len - tcphoff,
cp->protocol,
skb->csum);
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return 1;
}
static int
tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
unsigned int tcphoff;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
tcphoff = sizeof(struct ipv6hdr);
else
#endif
tcphoff = ip_hdrlen(skb);
switch (skb->ip_summed) {
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len - tcphoff,
ipv6_hdr(skb)->nexthdr,
skb->csum)) {
IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
"Failed checksum for");
return 0;
}
} else
#endif
if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
skb->len - tcphoff,
ip_hdr(skb)->protocol,
skb->csum)) {
IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
"Failed checksum for");
return 0;
}
break;
default:
/* No need to checksum. */
break;
}
return 1;
}
#define TCP_DIR_INPUT 0
#define TCP_DIR_OUTPUT 4
#define TCP_DIR_INPUT_ONLY 8
static const int tcp_state_off[IP_VS_DIR_LAST] = {
[IP_VS_DIR_INPUT] = TCP_DIR_INPUT,
[IP_VS_DIR_OUTPUT] = TCP_DIR_OUTPUT,
[IP_VS_DIR_INPUT_ONLY] = TCP_DIR_INPUT_ONLY,
};
/*
* Timeout table[state]
*/
static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_NONE] = 2*HZ,
[IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ,
[IP_VS_TCP_S_SYN_SENT] = 2*60*HZ,
[IP_VS_TCP_S_SYN_RECV] = 1*60*HZ,
[IP_VS_TCP_S_FIN_WAIT] = 2*60*HZ,
[IP_VS_TCP_S_TIME_WAIT] = 2*60*HZ,
[IP_VS_TCP_S_CLOSE] = 10*HZ,
[IP_VS_TCP_S_CLOSE_WAIT] = 60*HZ,
[IP_VS_TCP_S_LAST_ACK] = 30*HZ,
[IP_VS_TCP_S_LISTEN] = 2*60*HZ,
[IP_VS_TCP_S_SYNACK] = 120*HZ,
[IP_VS_TCP_S_LAST] = 2*HZ,
};
static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_NONE] = "NONE",
[IP_VS_TCP_S_ESTABLISHED] = "ESTABLISHED",
[IP_VS_TCP_S_SYN_SENT] = "SYN_SENT",
[IP_VS_TCP_S_SYN_RECV] = "SYN_RECV",
[IP_VS_TCP_S_FIN_WAIT] = "FIN_WAIT",
[IP_VS_TCP_S_TIME_WAIT] = "TIME_WAIT",
[IP_VS_TCP_S_CLOSE] = "CLOSE",
[IP_VS_TCP_S_CLOSE_WAIT] = "CLOSE_WAIT",
[IP_VS_TCP_S_LAST_ACK] = "LAST_ACK",
[IP_VS_TCP_S_LISTEN] = "LISTEN",
[IP_VS_TCP_S_SYNACK] = "SYNACK",
[IP_VS_TCP_S_LAST] = "BUG!",
};
#define sNO IP_VS_TCP_S_NONE
#define sES IP_VS_TCP_S_ESTABLISHED
#define sSS IP_VS_TCP_S_SYN_SENT
#define sSR IP_VS_TCP_S_SYN_RECV
#define sFW IP_VS_TCP_S_FIN_WAIT
#define sTW IP_VS_TCP_S_TIME_WAIT
#define sCL IP_VS_TCP_S_CLOSE
#define sCW IP_VS_TCP_S_CLOSE_WAIT
#define sLA IP_VS_TCP_S_LAST_ACK
#define sLI IP_VS_TCP_S_LISTEN
#define sSA IP_VS_TCP_S_SYNACK
struct tcp_states_t {
int next_state[IP_VS_TCP_S_LAST];
};
static const char * tcp_state_name(int state)
{
if (state >= IP_VS_TCP_S_LAST)
return "ERR!";
return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?";
}
static struct tcp_states_t tcp_states [] = {
/* INPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW }},
/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR }},
/* OUTPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSS, sES, sSS, sSR, sSS, sSS, sSS, sSS, sSS, sLI, sSR }},
/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
/* INPUT-ONLY */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
};
static struct tcp_states_t tcp_states_dos [] = {
/* INPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }},
/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA }},
/*ack*/ {{sCL, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }},
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
/* OUTPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSS, sES, sSS, sSA, sSS, sSS, sSS, sSS, sSS, sLI, sSA }},
/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
/* INPUT-ONLY */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA }},
/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
};
static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
{
int on = (flags & 1); /* secure_tcp */
/*
** FIXME: change secure_tcp to independent sysctl var
** or make it per-service or per-app because it is valid
** for most if not for all of the applications. Something
** like "capabilities" (flags) for each object.
*/
pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
}
static inline int tcp_state_idx(struct tcphdr *th)
{
if (th->rst)
return 3;
if (th->syn)
return 0;
if (th->fin)
return 1;
if (th->ack)
return 2;
return -1;
}
static inline void
set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, struct tcphdr *th)
{
int state_idx;
int new_state = IP_VS_TCP_S_CLOSE;
int state_off = tcp_state_off[direction];
/*
* Update state offset to INPUT_ONLY if necessary
* or delete NO_OUTPUT flag if output packet detected
*/
if (cp->flags & IP_VS_CONN_F_NOOUTPUT) {
if (state_off == TCP_DIR_OUTPUT)
cp->flags &= ~IP_VS_CONN_F_NOOUTPUT;
else
state_off = TCP_DIR_INPUT_ONLY;
}
if ((state_idx = tcp_state_idx(th)) < 0) {
IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx);
goto tcp_state_out;
}
new_state =
pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
tcp_state_out:
if (new_state != cp->state) {
struct ip_vs_dest *dest = cp->dest;
IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
"%s:%d state: %s->%s conn->refcnt:%d\n",
pd->pp->name,
((state_off == TCP_DIR_OUTPUT) ?
"output " : "input "),
th->syn ? 'S' : '.',
th->fin ? 'F' : '.',
th->ack ? 'A' : '.',
th->rst ? 'R' : '.',
IP_VS_DBG_ADDR(cp->af, &cp->daddr),
ntohs(cp->dport),
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
tcp_state_name(cp->state),
tcp_state_name(new_state),
atomic_read(&cp->refcnt));
if (dest) {
if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
(new_state != IP_VS_TCP_S_ESTABLISHED)) {
atomic_dec(&dest->activeconns);
atomic_inc(&dest->inactconns);
cp->flags |= IP_VS_CONN_F_INACTIVE;
} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
(new_state == IP_VS_TCP_S_ESTABLISHED)) {
atomic_inc(&dest->activeconns);
atomic_dec(&dest->inactconns);
cp->flags &= ~IP_VS_CONN_F_INACTIVE;
}
}
}
if (likely(pd))
cp->timeout = pd->timeout_table[cp->state = new_state];
else /* What to do ? */
cp->timeout = tcp_timeouts[cp->state = new_state];
}
/*
* Handle state transitions
*/
static int
tcp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
struct ip_vs_proto_data *pd)
{
struct tcphdr _tcph, *th;
#ifdef CONFIG_IP_VS_IPV6
int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
#else
int ihl = ip_hdrlen(skb);
#endif
th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
if (th == NULL)
return 0;
spin_lock(&cp->lock);
set_tcp_state(pd, cp, direction, th);
spin_unlock(&cp->lock);
return 1;
}
static inline __u16 tcp_app_hashkey(__be16 port)
{
return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
& TCP_APP_TAB_MASK;
}
static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
hash = tcp_app_hashkey(port);
spin_lock_bh(&ipvs->tcp_app_lock);
list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
list_add(&inc->p_list, &ipvs->tcp_apps[hash]);
atomic_inc(&pd->appcnt);
out:
spin_unlock_bh(&ipvs->tcp_app_lock);
return ret;
}
static void
tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
spin_lock_bh(&ipvs->tcp_app_lock);
atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
spin_unlock_bh(&ipvs->tcp_app_lock);
}
static int
tcp_app_conn_bind(struct ip_vs_conn *cp)
{
struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
/* Default binding: bind app only for NAT */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
return 0;
/* Lookup application incarnations and bind the right one */
hash = tcp_app_hashkey(cp->vport);
spin_lock(&ipvs->tcp_app_lock);
list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
spin_unlock(&ipvs->tcp_app_lock);
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
__func__,
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
ntohs(cp->vport),
inc->name, ntohs(inc->port));
cp->app = inc;
if (inc->init_conn)
result = inc->init_conn(inc, cp);
goto out;
}
}
spin_unlock(&ipvs->tcp_app_lock);
out:
return result;
}
/*
* Set LISTEN timeout. (ip_vs_conn_put will setup timer)
*/
void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
{
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
spin_lock(&cp->lock);
cp->state = IP_VS_TCP_S_LISTEN;
cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
: tcp_timeouts[IP_VS_TCP_S_LISTEN]);
spin_unlock(&cp->lock);
}
/* ---------------------------------------------
* timeouts is netns related now.
* ---------------------------------------------
*/
static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
spin_lock_init(&ipvs->tcp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
sizeof(tcp_timeouts));
pd->tcp_state_table = tcp_states;
}
static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
kfree(pd->timeout_table);
}
struct ip_vs_protocol ip_vs_protocol_tcp = {
.name = "TCP",
.protocol = IPPROTO_TCP,
.num_states = IP_VS_TCP_S_LAST,
.dont_defrag = 0,
.init = NULL,
.exit = NULL,
.init_netns = __ip_vs_tcp_init,
.exit_netns = __ip_vs_tcp_exit,
.register_app = tcp_register_app,
.unregister_app = tcp_unregister_app,
.conn_schedule = tcp_conn_schedule,
.conn_in_get = ip_vs_conn_in_get_proto,
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = tcp_snat_handler,
.dnat_handler = tcp_dnat_handler,
.csum_check = tcp_csum_check,
.state_name = tcp_state_name,
.state_transition = tcp_state_transition,
.app_conn_bind = tcp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = tcp_timeout_change,
};
| gpl-2.0 |
morogoku/MoRoKernel-I9300-4.4.4 | net/netfilter/ipvs/ip_vs_proto_udp.c | 2950 | 12585 | /*
* ip_vs_proto_udp.c: UDP load balancing support for IPVS
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
* Network name space (netns) aware.
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/udp.h>
#include <net/ip_vs.h>
#include <net/ip.h>
#include <net/ip6_checksum.h>
static int
udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
struct net *net;
struct ip_vs_service *svc;
struct udphdr _udph, *uh;
struct ip_vs_iphdr iph;
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph);
if (uh == NULL) {
*verdict = NF_DROP;
return 0;
}
net = skb_net(skb);
svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
&iph.daddr, uh->dest);
if (svc) {
int ignored;
if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
*/
ip_vs_service_put(svc);
*verdict = NF_DROP;
return 0;
}
/*
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
*cpp = ip_vs_schedule(svc, skb, pd, &ignored);
if (!*cpp && ignored <= 0) {
if (!ignored)
*verdict = ip_vs_leave(svc, skb, pd);
else {
ip_vs_service_put(svc);
*verdict = NF_DROP;
}
return 0;
}
ip_vs_service_put(svc);
}
/* NF_ACCEPT */
return 1;
}
static inline void
udp_fast_csum_update(int af, struct udphdr *uhdr,
const union nf_inet_addr *oldip,
const union nf_inet_addr *newip,
__be16 oldport, __be16 newport)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
uhdr->check =
csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
ip_vs_check_diff2(oldport, newport,
~csum_unfold(uhdr->check))));
else
#endif
uhdr->check =
csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
ip_vs_check_diff2(oldport, newport,
~csum_unfold(uhdr->check))));
if (!uhdr->check)
uhdr->check = CSUM_MANGLED_0;
}
static inline void
udp_partial_csum_update(int af, struct udphdr *uhdr,
const union nf_inet_addr *oldip,
const union nf_inet_addr *newip,
__be16 oldlen, __be16 newlen)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
uhdr->check =
~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
ip_vs_check_diff2(oldlen, newlen,
csum_unfold(uhdr->check))));
else
#endif
uhdr->check =
~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
ip_vs_check_diff2(oldlen, newlen,
csum_unfold(uhdr->check))));
}
static int
udp_snat_handler(struct sk_buff *skb,
struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
struct udphdr *udph;
unsigned int udphoff;
int oldlen;
int payload_csum = 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
udphoff = sizeof(struct ipv6hdr);
else
#endif
udphoff = ip_hdrlen(skb);
oldlen = skb->len - udphoff;
/* csum_check requires unshared skb */
if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
return 0;
/*
* Call application helper if needed
*/
if (!(ret = ip_vs_app_pkt_out(cp, skb)))
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 1)
oldlen = skb->len - udphoff;
else
payload_csum = 1;
}
udph = (void *)skb_network_header(skb) + udphoff;
udph->source = cp->vport;
/*
* Adjust UDP checksums
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
htons(oldlen),
htons(skb->len - udphoff));
} else if (!payload_csum && (udph->check != 0)) {
/* Only port and addr are changed, do fast csum update */
udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
cp->dport, cp->vport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = (cp->app && pp->csum_check) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
udph->check = 0;
skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
udph->check = csum_ipv6_magic(&cp->vaddr.in6,
&cp->caddr.in6,
skb->len - udphoff,
cp->protocol, skb->csum);
else
#endif
udph->check = csum_tcpudp_magic(cp->vaddr.ip,
cp->caddr.ip,
skb->len - udphoff,
cp->protocol,
skb->csum);
if (udph->check == 0)
udph->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
pp->name, udph->check,
(char*)&(udph->check) - (char*)udph);
}
return 1;
}
static int
udp_dnat_handler(struct sk_buff *skb,
struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
struct udphdr *udph;
unsigned int udphoff;
int oldlen;
int payload_csum = 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
udphoff = sizeof(struct ipv6hdr);
else
#endif
udphoff = ip_hdrlen(skb);
oldlen = skb->len - udphoff;
/* csum_check requires unshared skb */
if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
return 0;
if (unlikely(cp->app != NULL)) {
int ret;
/* Some checks before mangling */
if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
return 0;
/*
* Attempt ip_vs_app call.
* It will fix ip_vs_conn
*/
if (!(ret = ip_vs_app_pkt_in(cp, skb)))
return 0;
/* ret=2: csum update is needed after payload mangling */
if (ret == 1)
oldlen = skb->len - udphoff;
else
payload_csum = 1;
}
udph = (void *)skb_network_header(skb) + udphoff;
udph->dest = cp->dport;
/*
* Adjust UDP checksums
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
udp_partial_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
htons(oldlen),
htons(skb->len - udphoff));
} else if (!payload_csum && (udph->check != 0)) {
/* Only port and addr are changed, do fast csum update */
udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
cp->vport, cp->dport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = (cp->app && pp->csum_check) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
udph->check = 0;
skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
udph->check = csum_ipv6_magic(&cp->caddr.in6,
&cp->daddr.in6,
skb->len - udphoff,
cp->protocol, skb->csum);
else
#endif
udph->check = csum_tcpudp_magic(cp->caddr.ip,
cp->daddr.ip,
skb->len - udphoff,
cp->protocol,
skb->csum);
if (udph->check == 0)
udph->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return 1;
}
static int
udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
struct udphdr _udph, *uh;
unsigned int udphoff;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
udphoff = sizeof(struct ipv6hdr);
else
#endif
udphoff = ip_hdrlen(skb);
uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
if (uh == NULL)
return 0;
if (uh->check != 0) {
switch (skb->ip_summed) {
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, udphoff,
skb->len - udphoff, 0);
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len - udphoff,
ipv6_hdr(skb)->nexthdr,
skb->csum)) {
IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
"Failed checksum for");
return 0;
}
} else
#endif
if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
skb->len - udphoff,
ip_hdr(skb)->protocol,
skb->csum)) {
IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
"Failed checksum for");
return 0;
}
break;
default:
/* No need to checksum. */
break;
}
}
return 1;
}
static inline __u16 udp_app_hashkey(__be16 port)
{
return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
& UDP_APP_TAB_MASK;
}
static int udp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
hash = udp_app_hashkey(port);
spin_lock_bh(&ipvs->udp_app_lock);
list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
list_add(&inc->p_list, &ipvs->udp_apps[hash]);
atomic_inc(&pd->appcnt);
out:
spin_unlock_bh(&ipvs->udp_app_lock);
return ret;
}
static void
udp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
struct netns_ipvs *ipvs = net_ipvs(net);
spin_lock_bh(&ipvs->udp_app_lock);
atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
spin_unlock_bh(&ipvs->udp_app_lock);
}
static int udp_app_conn_bind(struct ip_vs_conn *cp)
{
struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
/* Default binding: bind app only for NAT */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
return 0;
/* Lookup application incarnations and bind the right one */
hash = udp_app_hashkey(cp->vport);
spin_lock(&ipvs->udp_app_lock);
list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
spin_unlock(&ipvs->udp_app_lock);
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
__func__,
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
ntohs(cp->vport),
inc->name, ntohs(inc->port));
cp->app = inc;
if (inc->init_conn)
result = inc->init_conn(inc, cp);
goto out;
}
}
spin_unlock(&ipvs->udp_app_lock);
out:
return result;
}
static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_NORMAL] = 5*60*HZ,
[IP_VS_UDP_S_LAST] = 2*HZ,
};
static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_NORMAL] = "UDP",
[IP_VS_UDP_S_LAST] = "BUG!",
};
static const char * udp_state_name(int state)
{
if (state >= IP_VS_UDP_S_LAST)
return "ERR!";
return udp_state_name_table[state] ? udp_state_name_table[state] : "?";
}
static int
udp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
struct ip_vs_proto_data *pd)
{
if (unlikely(!pd)) {
pr_err("UDP no ns data\n");
return 0;
}
cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
return 1;
}
static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
spin_lock_init(&ipvs->udp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
sizeof(udp_timeouts));
}
static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
kfree(pd->timeout_table);
}
struct ip_vs_protocol ip_vs_protocol_udp = {
.name = "UDP",
.protocol = IPPROTO_UDP,
.num_states = IP_VS_UDP_S_LAST,
.dont_defrag = 0,
.init = NULL,
.exit = NULL,
.init_netns = __udp_init,
.exit_netns = __udp_exit,
.conn_schedule = udp_conn_schedule,
.conn_in_get = ip_vs_conn_in_get_proto,
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = udp_snat_handler,
.dnat_handler = udp_dnat_handler,
.csum_check = udp_csum_check,
.state_transition = udp_state_transition,
.state_name = udp_state_name,
.register_app = udp_register_app,
.unregister_app = udp_unregister_app,
.app_conn_bind = udp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL,
};
| gpl-2.0 |
Scorpio92/LG_D618_kernel | arch/unicore32/mm/init.c | 3718 | 12947 | /*
* linux/arch/unicore32/mm/init.c
*
* Copyright (C) 2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/memblock.h>
#include <mach/map.h>
#include "mm.h"
static unsigned long phys_initrd_start __initdata = 0x01000000;
static unsigned long phys_initrd_size __initdata = SZ_8M;
static int __init early_initrd(char *p)
{
unsigned long start, size;
char *endp;
start = memparse(p, &endp);
if (*endp == ',') {
size = memparse(endp + 1, NULL);
phys_initrd_start = start;
phys_initrd_size = size;
}
return 0;
}
early_param("initrd", early_initrd);
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
* of holes in the memory map. It is populated by uc32_add_memory().
*/
struct meminfo meminfo;
void show_mem(unsigned int filter)
{
int free = 0, total = 0, reserved = 0;
int shared = 0, cached = 0, slab = 0, i;
struct meminfo *mi = &meminfo;
printk(KERN_DEFAULT "Mem-info:\n");
show_free_areas(filter);
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
total++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (PageSlab(page))
slab++;
else if (!page_count(page))
free++;
else
shared += page_count(page) - 1;
page++;
} while (page < end);
}
printk(KERN_DEFAULT "%d pages of RAM\n", total);
printk(KERN_DEFAULT "%d free pages\n", free);
printk(KERN_DEFAULT "%d reserved pages\n", reserved);
printk(KERN_DEFAULT "%d slab pages\n", slab);
printk(KERN_DEFAULT "%d pages shared\n", shared);
printk(KERN_DEFAULT "%d pages swap cached\n", cached);
}
static void __init find_limits(unsigned long *min, unsigned long *max_low,
unsigned long *max_high)
{
struct meminfo *mi = &meminfo;
int i;
*min = -1UL;
*max_low = *max_high = 0;
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
unsigned long start, end;
start = bank_pfn_start(bank);
end = bank_pfn_end(bank);
if (*min > start)
*min = start;
if (*max_high < end)
*max_high = end;
if (bank->highmem)
continue;
if (*max_low < end)
*max_low = end;
}
}
static void __init uc32_bootmem_init(unsigned long start_pfn,
unsigned long end_pfn)
{
struct memblock_region *reg;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
/*
* Allocate the bootmem bitmap page. This must be in a region
* of memory which has already been mapped.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
__pfn_to_phys(end_pfn));
/*
* Initialise the bootmem allocator, handing the
* memory banks over to bootmem.
*/
node_set_online(0);
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
/* Free the lowmem regions from memblock into bootmem. */
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
/* Reserve the lowmem memblock reserved regions in bootmem. */
for_each_memblock(reserved, reg) {
unsigned long start = memblock_region_reserved_base_pfn(reg);
unsigned long end = memblock_region_reserved_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
reserve_bootmem(__pfn_to_phys(start),
(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
}
static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
struct memblock_region *reg;
/*
* initialise the zones.
*/
memset(zone_size, 0, sizeof(zone_size));
/*
* The memory size has already been determined. If we need
* to do anything fancy with the allocation of this memory
* to the zones, now is the time to do it.
*/
zone_size[0] = max_low - min;
/*
* Calculate the size of the holes.
* holes = node_size - sum(bank_sizes)
*/
memcpy(zhole_size, zone_size, sizeof(zhole_size));
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (start < max_low) {
unsigned long low_end = min(end, max_low);
zhole_size[0] -= low_end - start;
}
}
/*
* Adjust the sizes according to any special requirements for
* this machine type.
*/
arch_adjust_zones(zone_size, zhole_size);
free_area_init_node(0, zone_size, min, zhole_size);
}
int pfn_valid(unsigned long pfn)
{
return memblock_is_memory(pfn << PAGE_SHIFT);
}
EXPORT_SYMBOL(pfn_valid);
static void uc32_memory_present(void)
{
}
static int __init meminfo_cmp(const void *_a, const void *_b)
{
const struct membank *a = _a, *b = _b;
long cmp = bank_pfn_start(a) - bank_pfn_start(b);
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
void __init uc32_memblock_init(struct meminfo *mi)
{
int i;
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
meminfo_cmp, NULL);
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
/* Register the kernel text, kernel data and initrd with memblock. */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
memblock_reserve(phys_initrd_start, phys_initrd_size);
/* Now convert initrd to virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
#endif
uc32_mm_memblock_reserve();
memblock_allow_resize();
memblock_dump_all();
}
void __init bootmem_init(void)
{
unsigned long min, max_low, max_high;
max_low = max_high = 0;
find_limits(&min, &max_low, &max_high);
uc32_bootmem_init(min, max_low);
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
#endif
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
*/
uc32_memory_present();
/*
* sparse_init() needs the bootmem allocator up and running.
*/
sparse_init();
/*
* Now free the memory - free_area_init_node needs
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
uc32_bootmem_free(min, max_low, max_high);
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
/*
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
* also get rid of some of the stuff above as well.
*
* Note: max_low_pfn and max_pfn reflect the number of _pages_ in
* the system, not the maximum PFN.
*/
max_low_pfn = max_low - PHYS_PFN_OFFSET;
max_pfn = max_high - PHYS_PFN_OFFSET;
}
static inline int free_area(unsigned long pfn, unsigned long end, char *s)
{
unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
for (; pfn < end; pfn++) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
pages++;
}
if (size && s)
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
return pages;
}
static inline void
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
unsigned long pg, pgend;
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn - 1) + 1;
end_pg = pfn_to_page(end_pfn);
/*
* Convert to physical addresses, and
* round start upwards and end downwards.
*/
pg = PAGE_ALIGN(__pa(start_pg));
pgend = __pa(end_pg) & PAGE_MASK;
/*
* If there are free pages between these,
* free the section of the memmap array.
*/
if (pg < pgend)
free_bootmem(pg, pgend - pg);
}
/*
* The mem_map array can get very big. Free the unused area of the memory map.
*/
static void __init free_unused_memmap(struct meminfo *mi)
{
unsigned long bank_start, prev_bank_end = 0;
unsigned int i;
/*
* This relies on each bank being in address order.
* The banks are sorted previously in bootmem_init().
*/
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
bank_start = bank_pfn_start(bank);
/*
* If we had a previous bank, and there is a space
* between the current bank and the previous, free it.
*/
if (prev_bank_end && prev_bank_end < bank_start)
free_memmap(prev_bank_end, bank_start);
/*
* Align up here since the VM subsystem insists that the
* memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES.
*/
prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
}
}
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
* claimed their memory after the kernel image.
*/
void __init mem_init(void)
{
unsigned long reserved_pages, free_pages;
struct memblock_region *reg;
int i;
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
/* this will put all unused low memory onto the freelists */
free_unused_memmap(&meminfo);
totalram_pages += free_all_bootmem();
reserved_pages = free_pages = 0;
for_each_bank(i, &meminfo) {
struct membank *bank = &meminfo.bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
if (PageReserved(page))
reserved_pages++;
else if (!page_count(page))
free_pages++;
page++;
} while (page < end);
}
/*
* Since our memory may not be contiguous, calculate the
* real number of pages we have in this system
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
for_each_memblock(memory, reg) {
unsigned long pages = memblock_region_memory_end_pfn(reg) -
memblock_region_memory_base_pfn(reg);
num_physpages += pages;
printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
nr_free_pages() << (PAGE_SHIFT-10),
free_pages << (PAGE_SHIFT-10),
reserved_pages << (PAGE_SHIFT-10),
totalhigh_pages << (PAGE_SHIFT-10));
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
" lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
" .data : 0x%p" " - 0x%p" " (%4d kB)\n",
VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
VMALLOC_START, VMALLOC_END,
DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
PAGE_OFFSET, (unsigned long)high_memory,
DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
MODULES_VADDR, MODULES_END,
DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
__init_begin, __init_end,
DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
_stext, _etext,
DIV_ROUND_UP((_etext - _stext), SZ_1K),
_sdata, _edata,
DIV_ROUND_UP((_edata - _sdata), SZ_1K));
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
BUG_ON(TASK_SIZE > MODULES_VADDR);
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
/*
* On a machine this small we won't get
* anywhere without overcommit, so turn
* it on by default.
*/
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
}
}
void free_initmem(void)
{
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
__phys_to_pfn(__pa(__init_end)),
"init");
}
#ifdef CONFIG_BLK_DEV_INITRD
static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
__phys_to_pfn(__pa(end)),
"initrd");
}
static int __init keepinitrd_setup(char *__unused)
{
keep_initrd = 1;
return 1;
}
__setup("keepinitrd", keepinitrd_setup);
#endif
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.