repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
puppeh/gcc-6502 | libbacktrace/sort.c | 89 | 3182 | /* sort.c -- Sort without allocating memory
Copyright (C) 2012-2015 Free Software Foundation, Inc.
Written by Ian Lance Taylor, Google.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
(1) Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
(2) Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
(3) The name of the author may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. */
#include "config.h"
#include <stddef.h>
#include <sys/types.h>
#include "backtrace.h"
#include "internal.h"
/* The GNU glibc version of qsort allocates memory, which we must not
do if we are invoked by a signal handler. So provide our own
sort. */
static void
swap (char *a, char *b, size_t size)
{
size_t i;
for (i = 0; i < size; i++, a++, b++)
{
char t;
t = *a;
*a = *b;
*b = t;
}
}
void
backtrace_qsort (void *basearg, size_t count, size_t size,
int (*compar) (const void *, const void *))
{
char *base = (char *) basearg;
size_t i;
size_t mid;
tail_recurse:
if (count < 2)
return;
/* The symbol table and DWARF tables, which is all we use this
routine for, tend to be roughly sorted. Pick the middle element
in the array as our pivot point, so that we are more likely to
cut the array in half for each recursion step. */
swap (base, base + (count / 2) * size, size);
mid = 0;
for (i = 1; i < count; i++)
{
if ((*compar) (base, base + i * size) > 0)
{
++mid;
if (i != mid)
swap (base + mid * size, base + i * size, size);
}
}
if (mid > 0)
swap (base, base + mid * size, size);
/* Recurse with the smaller array, loop with the larger one. That
ensures that our maximum stack depth is log count. */
if (2 * mid < count)
{
backtrace_qsort (base, mid, size, compar);
base += (mid + 1) * size;
count -= mid + 1;
goto tail_recurse;
}
else
{
backtrace_qsort (base + (mid + 1) * size, count - (mid + 1),
size, compar);
count = mid;
goto tail_recurse;
}
}
| gpl-2.0 |
ZUK2/android_kernel_zuk_z2_plus | drivers/iio/light/adjd_s311.c | 1625 | 8008 | /*
* adjd_s311.c - Support for ADJD-S311-CR999 digital color sensor
*
* Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
*
* This file is subject to the terms and conditions of version 2 of
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
* driver for ADJD-S311-CR999 digital color sensor (10-bit channels for
* red, green, blue, clear); 7-bit I2C slave address 0x74
*
* limitations: no calibration, no offset mode, no sleep mode
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
#define ADJD_S311_DRV_NAME "adjd_s311"
#define ADJD_S311_CTRL 0x00
#define ADJD_S311_CONFIG 0x01
#define ADJD_S311_CAP_RED 0x06
#define ADJD_S311_CAP_GREEN 0x07
#define ADJD_S311_CAP_BLUE 0x08
#define ADJD_S311_CAP_CLEAR 0x09
#define ADJD_S311_INT_RED 0x0a
#define ADJD_S311_INT_GREEN 0x0c
#define ADJD_S311_INT_BLUE 0x0e
#define ADJD_S311_INT_CLEAR 0x10
#define ADJD_S311_DATA_RED 0x40
#define ADJD_S311_DATA_GREEN 0x42
#define ADJD_S311_DATA_BLUE 0x44
#define ADJD_S311_DATA_CLEAR 0x46
#define ADJD_S311_OFFSET_RED 0x48
#define ADJD_S311_OFFSET_GREEN 0x49
#define ADJD_S311_OFFSET_BLUE 0x4a
#define ADJD_S311_OFFSET_CLEAR 0x4b
#define ADJD_S311_CTRL_GOFS 0x02
#define ADJD_S311_CTRL_GSSR 0x01
#define ADJD_S311_CAP_MASK 0x0f
#define ADJD_S311_INT_MASK 0x0fff
#define ADJD_S311_DATA_MASK 0x03ff
struct adjd_s311_data {
struct i2c_client *client;
u16 *buffer;
};
enum adjd_s311_channel_idx {
IDX_RED, IDX_GREEN, IDX_BLUE, IDX_CLEAR
};
#define ADJD_S311_DATA_REG(chan) (ADJD_S311_DATA_RED + (chan) * 2)
#define ADJD_S311_INT_REG(chan) (ADJD_S311_INT_RED + (chan) * 2)
#define ADJD_S311_CAP_REG(chan) (ADJD_S311_CAP_RED + (chan))
static int adjd_s311_req_data(struct iio_dev *indio_dev)
{
struct adjd_s311_data *data = iio_priv(indio_dev);
int tries = 10;
int ret = i2c_smbus_write_byte_data(data->client, ADJD_S311_CTRL,
ADJD_S311_CTRL_GSSR);
if (ret < 0)
return ret;
while (tries--) {
ret = i2c_smbus_read_byte_data(data->client, ADJD_S311_CTRL);
if (ret < 0)
return ret;
if (!(ret & ADJD_S311_CTRL_GSSR))
break;
msleep(20);
}
if (tries < 0) {
dev_err(&data->client->dev,
"adjd_s311_req_data() failed, data not ready\n");
return -EIO;
}
return 0;
}
static int adjd_s311_read_data(struct iio_dev *indio_dev, u8 reg, int *val)
{
struct adjd_s311_data *data = iio_priv(indio_dev);
int ret = adjd_s311_req_data(indio_dev);
if (ret < 0)
return ret;
ret = i2c_smbus_read_word_data(data->client, reg);
if (ret < 0)
return ret;
*val = ret & ADJD_S311_DATA_MASK;
return 0;
}
static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adjd_s311_data *data = iio_priv(indio_dev);
s64 time_ns = iio_get_time_ns();
int i, j = 0;
int ret = adjd_s311_req_data(indio_dev);
if (ret < 0)
goto done;
for_each_set_bit(i, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = i2c_smbus_read_word_data(data->client,
ADJD_S311_DATA_REG(i));
if (ret < 0)
goto done;
data->buffer[j++] = ret & ADJD_S311_DATA_MASK;
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, time_ns);
done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
#define ADJD_S311_CHANNEL(_color, _scan_idx) { \
.type = IIO_INTENSITY, \
.modified = 1, \
.address = (IDX_##_color), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_HARDWAREGAIN) | \
BIT(IIO_CHAN_INFO_INT_TIME), \
.channel2 = (IIO_MOD_LIGHT_##_color), \
.scan_index = (_scan_idx), \
.scan_type = { \
.sign = 'u', \
.realbits = 10, \
.storagebits = 16, \
.endianness = IIO_CPU, \
}, \
}
static const struct iio_chan_spec adjd_s311_channels[] = {
ADJD_S311_CHANNEL(RED, 0),
ADJD_S311_CHANNEL(GREEN, 1),
ADJD_S311_CHANNEL(BLUE, 2),
ADJD_S311_CHANNEL(CLEAR, 3),
IIO_CHAN_SOFT_TIMESTAMP(4),
};
static int adjd_s311_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct adjd_s311_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = adjd_s311_read_data(indio_dev,
ADJD_S311_DATA_REG(chan->address), val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_HARDWAREGAIN:
ret = i2c_smbus_read_byte_data(data->client,
ADJD_S311_CAP_REG(chan->address));
if (ret < 0)
return ret;
*val = ret & ADJD_S311_CAP_MASK;
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
ret = i2c_smbus_read_word_data(data->client,
ADJD_S311_INT_REG(chan->address));
if (ret < 0)
return ret;
*val = 0;
/*
* not documented, based on measurement:
* 4095 LSBs correspond to roughly 4 ms
*/
*val2 = ret & ADJD_S311_INT_MASK;
return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
static int adjd_s311_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct adjd_s311_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_HARDWAREGAIN:
if (val < 0 || val > ADJD_S311_CAP_MASK)
return -EINVAL;
return i2c_smbus_write_byte_data(data->client,
ADJD_S311_CAP_REG(chan->address), val);
case IIO_CHAN_INFO_INT_TIME:
if (val != 0 || val2 < 0 || val2 > ADJD_S311_INT_MASK)
return -EINVAL;
return i2c_smbus_write_word_data(data->client,
ADJD_S311_INT_REG(chan->address), val2);
}
return -EINVAL;
}
static int adjd_s311_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct adjd_s311_data *data = iio_priv(indio_dev);
kfree(data->buffer);
data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
if (data->buffer == NULL)
return -ENOMEM;
return 0;
}
static const struct iio_info adjd_s311_info = {
.read_raw = adjd_s311_read_raw,
.write_raw = adjd_s311_write_raw,
.update_scan_mode = adjd_s311_update_scan_mode,
.driver_module = THIS_MODULE,
};
static int adjd_s311_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adjd_s311_data *data;
struct iio_dev *indio_dev;
int err;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (indio_dev == NULL)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
indio_dev->dev.parent = &client->dev;
indio_dev->info = &adjd_s311_info;
indio_dev->name = ADJD_S311_DRV_NAME;
indio_dev->channels = adjd_s311_channels;
indio_dev->num_channels = ARRAY_SIZE(adjd_s311_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
err = iio_triggered_buffer_setup(indio_dev, NULL,
adjd_s311_trigger_handler, NULL);
if (err < 0)
return err;
err = iio_device_register(indio_dev);
if (err)
goto exit_unreg_buffer;
dev_info(&client->dev, "ADJD-S311 color sensor registered\n");
return 0;
exit_unreg_buffer:
iio_triggered_buffer_cleanup(indio_dev);
return err;
}
static int adjd_s311_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct adjd_s311_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
kfree(data->buffer);
return 0;
}
static const struct i2c_device_id adjd_s311_id[] = {
{ "adjd_s311", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adjd_s311_id);
static struct i2c_driver adjd_s311_driver = {
.driver = {
.name = ADJD_S311_DRV_NAME,
},
.probe = adjd_s311_probe,
.remove = adjd_s311_remove,
.id_table = adjd_s311_id,
};
module_i2c_driver(adjd_s311_driver);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
MODULE_DESCRIPTION("ADJD-S311 color sensor");
MODULE_LICENSE("GPL");
| gpl-2.0 |
doungni/linux | drivers/input/input-polldev.c | 1881 | 9516 | /*
* Generic implementation of a polled input device
* Copyright (c) 2007 Dmitry Torokhov
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/input-polldev.h>
MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
MODULE_DESCRIPTION("Generic implementation of a polled input device");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.1");
static void input_polldev_queue_work(struct input_polled_dev *dev)
{
unsigned long delay;
delay = msecs_to_jiffies(dev->poll_interval);
if (delay >= HZ)
delay = round_jiffies_relative(delay);
queue_delayed_work(system_freezable_wq, &dev->work, delay);
}
static void input_polled_device_work(struct work_struct *work)
{
struct input_polled_dev *dev =
container_of(work, struct input_polled_dev, work.work);
dev->poll(dev);
input_polldev_queue_work(dev);
}
static int input_open_polled_device(struct input_dev *input)
{
struct input_polled_dev *dev = input_get_drvdata(input);
if (dev->open)
dev->open(dev);
/* Only start polling if polling is enabled */
if (dev->poll_interval > 0) {
dev->poll(dev);
input_polldev_queue_work(dev);
}
return 0;
}
static void input_close_polled_device(struct input_dev *input)
{
struct input_polled_dev *dev = input_get_drvdata(input);
cancel_delayed_work_sync(&dev->work);
if (dev->close)
dev->close(dev);
}
/* SYSFS interface */
static ssize_t input_polldev_get_poll(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct input_polled_dev *polldev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", polldev->poll_interval);
}
static ssize_t input_polldev_set_poll(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct input_polled_dev *polldev = dev_get_drvdata(dev);
struct input_dev *input = polldev->input;
unsigned int interval;
int err;
err = kstrtouint(buf, 0, &interval);
if (err)
return err;
if (interval < polldev->poll_interval_min)
return -EINVAL;
if (interval > polldev->poll_interval_max)
return -EINVAL;
mutex_lock(&input->mutex);
polldev->poll_interval = interval;
if (input->users) {
cancel_delayed_work_sync(&polldev->work);
if (polldev->poll_interval > 0)
input_polldev_queue_work(polldev);
}
mutex_unlock(&input->mutex);
return count;
}
static DEVICE_ATTR(poll, S_IRUGO | S_IWUSR, input_polldev_get_poll,
input_polldev_set_poll);
static ssize_t input_polldev_get_max(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct input_polled_dev *polldev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", polldev->poll_interval_max);
}
static DEVICE_ATTR(max, S_IRUGO, input_polldev_get_max, NULL);
static ssize_t input_polldev_get_min(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct input_polled_dev *polldev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", polldev->poll_interval_min);
}
static DEVICE_ATTR(min, S_IRUGO, input_polldev_get_min, NULL);
static struct attribute *sysfs_attrs[] = {
&dev_attr_poll.attr,
&dev_attr_max.attr,
&dev_attr_min.attr,
NULL
};
static struct attribute_group input_polldev_attribute_group = {
.attrs = sysfs_attrs
};
static const struct attribute_group *input_polldev_attribute_groups[] = {
&input_polldev_attribute_group,
NULL
};
/**
* input_allocate_polled_device - allocate memory for polled device
*
* The function allocates memory for a polled device and also
* for an input device associated with this polled device.
*/
struct input_polled_dev *input_allocate_polled_device(void)
{
struct input_polled_dev *dev;
dev = kzalloc(sizeof(struct input_polled_dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->input = input_allocate_device();
if (!dev->input) {
kfree(dev);
return NULL;
}
return dev;
}
EXPORT_SYMBOL(input_allocate_polled_device);
struct input_polled_devres {
struct input_polled_dev *polldev;
};
static int devm_input_polldev_match(struct device *dev, void *res, void *data)
{
struct input_polled_devres *devres = res;
return devres->polldev == data;
}
static void devm_input_polldev_release(struct device *dev, void *res)
{
struct input_polled_devres *devres = res;
struct input_polled_dev *polldev = devres->polldev;
dev_dbg(dev, "%s: dropping reference/freeing %s\n",
__func__, dev_name(&polldev->input->dev));
input_put_device(polldev->input);
kfree(polldev);
}
static void devm_input_polldev_unregister(struct device *dev, void *res)
{
struct input_polled_devres *devres = res;
struct input_polled_dev *polldev = devres->polldev;
dev_dbg(dev, "%s: unregistering device %s\n",
__func__, dev_name(&polldev->input->dev));
input_unregister_device(polldev->input);
/*
* Note that we are still holding extra reference to the input
* device so it will stick around until devm_input_polldev_release()
* is called.
*/
}
/**
* devm_input_allocate_polled_device - allocate managed polled device
* @dev: device owning the polled device being created
*
* Returns prepared &struct input_polled_dev or %NULL.
*
* Managed polled input devices do not need to be explicitly unregistered
* or freed as it will be done automatically when owner device unbinds
* from * its driver (or binding fails). Once such managed polled device
* is allocated, it is ready to be set up and registered in the same
* fashion as regular polled input devices (using
* input_register_polled_device() function).
*
* If you want to manually unregister and free such managed polled devices,
* it can be still done by calling input_unregister_polled_device() and
* input_free_polled_device(), although it is rarely needed.
*
* NOTE: the owner device is set up as parent of input device and users
* should not override it.
*/
struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev)
{
struct input_polled_dev *polldev;
struct input_polled_devres *devres;
devres = devres_alloc(devm_input_polldev_release, sizeof(*devres),
GFP_KERNEL);
if (!devres)
return NULL;
polldev = input_allocate_polled_device();
if (!polldev) {
devres_free(devres);
return NULL;
}
polldev->input->dev.parent = dev;
polldev->devres_managed = true;
devres->polldev = polldev;
devres_add(dev, devres);
return polldev;
}
EXPORT_SYMBOL(devm_input_allocate_polled_device);
/**
* input_free_polled_device - free memory allocated for polled device
* @dev: device to free
*
* The function frees memory allocated for polling device and drops
* reference to the associated input device.
*/
void input_free_polled_device(struct input_polled_dev *dev)
{
if (dev) {
if (dev->devres_managed)
WARN_ON(devres_destroy(dev->input->dev.parent,
devm_input_polldev_release,
devm_input_polldev_match,
dev));
input_put_device(dev->input);
kfree(dev);
}
}
EXPORT_SYMBOL(input_free_polled_device);
/**
* input_register_polled_device - register polled device
* @dev: device to register
*
* The function registers previously initialized polled input device
* with input layer. The device should be allocated with call to
* input_allocate_polled_device(). Callers should also set up poll()
* method and set up capabilities (id, name, phys, bits) of the
* corresponding input_dev structure.
*/
int input_register_polled_device(struct input_polled_dev *dev)
{
struct input_polled_devres *devres = NULL;
struct input_dev *input = dev->input;
int error;
if (dev->devres_managed) {
devres = devres_alloc(devm_input_polldev_unregister,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
devres->polldev = dev;
}
input_set_drvdata(input, dev);
INIT_DELAYED_WORK(&dev->work, input_polled_device_work);
if (!dev->poll_interval)
dev->poll_interval = 500;
if (!dev->poll_interval_max)
dev->poll_interval_max = dev->poll_interval;
input->open = input_open_polled_device;
input->close = input_close_polled_device;
input->dev.groups = input_polldev_attribute_groups;
error = input_register_device(input);
if (error) {
devres_free(devres);
return error;
}
/*
* Take extra reference to the underlying input device so
* that it survives call to input_unregister_polled_device()
* and is deleted only after input_free_polled_device()
* has been invoked. This is needed to ease task of freeing
* sparse keymaps.
*/
input_get_device(input);
if (dev->devres_managed) {
dev_dbg(input->dev.parent, "%s: registering %s with devres.\n",
__func__, dev_name(&input->dev));
devres_add(input->dev.parent, devres);
}
return 0;
}
EXPORT_SYMBOL(input_register_polled_device);
/**
* input_unregister_polled_device - unregister polled device
* @dev: device to unregister
*
* The function unregisters previously registered polled input
* device from input layer. Polling is stopped and device is
* ready to be freed with call to input_free_polled_device().
*/
void input_unregister_polled_device(struct input_polled_dev *dev)
{
if (dev->devres_managed)
WARN_ON(devres_destroy(dev->input->dev.parent,
devm_input_polldev_unregister,
devm_input_polldev_match,
dev));
input_unregister_device(dev->input);
}
EXPORT_SYMBOL(input_unregister_polled_device);
| gpl-2.0 |
estiko/android_kernel_cyanogen_msm8916 | arch/s390/kernel/irq.c | 1881 | 10161 | /*
* Copyright IBM Corp. 2004, 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Holger Smolinski <Holger.Smolinski@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
*
* This file contains interrupt related functions.
*/
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/irq_regs.h>
#include <asm/cputime.h>
#include <asm/lowcore.h>
#include <asm/irq.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
EXPORT_PER_CPU_SYMBOL_GPL(irq_stat);
struct irq_class {
char *name;
char *desc;
};
/*
* The list of "main" irq classes on s390. This is the list of interrupts
* that appear both in /proc/stat ("intr" line) and /proc/interrupts.
* Historically only external and I/O interrupts have been part of /proc/stat.
* We can't add the split external and I/O sub classes since the first field
* in the "intr" line in /proc/stat is supposed to be the sum of all other
* fields.
* Since the external and I/O interrupt fields are already sums we would end
* up with having a sum which accounts each interrupt twice.
*/
static const struct irq_class irqclass_main_desc[NR_IRQS] = {
[EXTERNAL_INTERRUPT] = {.name = "EXT"},
[IO_INTERRUPT] = {.name = "I/O"}
};
/*
* The list of split external and I/O interrupts that appear only in
* /proc/interrupts.
* In addition this list contains non external / I/O events like NMIs.
*/
static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
[IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
[IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
[IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
[IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
[IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
[IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
[IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
[IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
[IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
[IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
[IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
[IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
[IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
[IRQIO_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
[IRQIO_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
[IRQIO_DAS] = {.name = "DAS", .desc = "[I/O] DASD"},
[IRQIO_C15] = {.name = "C15", .desc = "[I/O] 3215"},
[IRQIO_C70] = {.name = "C70", .desc = "[I/O] 3270"},
[IRQIO_TAP] = {.name = "TAP", .desc = "[I/O] Tape"},
[IRQIO_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
[IRQIO_LCS] = {.name = "LCS", .desc = "[I/O] LCS"},
[IRQIO_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"},
[IRQIO_CTC] = {.name = "CTC", .desc = "[I/O] CTC"},
[IRQIO_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
[IRQIO_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
[IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
[IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
[IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
[IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
[CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
};
/*
* show_interrupts is needed by /proc/interrupts.
*/
int show_interrupts(struct seq_file *p, void *v)
{
int irq = *(loff_t *) v;
int cpu;
get_online_cpus();
if (irq == 0) {
seq_puts(p, " ");
for_each_online_cpu(cpu)
seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n');
}
if (irq < NR_IRQS) {
seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]);
seq_putc(p, '\n');
goto skip_arch_irqs;
}
for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]);
if (irqclass_sub_desc[irq].desc)
seq_printf(p, " %s", irqclass_sub_desc[irq].desc);
seq_putc(p, '\n');
}
skip_arch_irqs:
put_online_cpus();
return 0;
}
/*
* Switch to the asynchronous interrupt stack for softirq execution.
*/
asmlinkage void do_softirq(void)
{
unsigned long flags, old, new;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending()) {
/* Get current stack pointer. */
asm volatile("la %0,0(15)" : "=a" (old));
/* Check against async. stack address range. */
new = S390_lowcore.async_stack;
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
/* Need to switch to the async. stack. */
new -= STACK_FRAME_OVERHEAD;
((struct stack_frame *) new)->back_chain = old;
asm volatile(" la 15,0(%0)\n"
" basr 14,%2\n"
" la 15,0(%1)\n"
: : "a" (new), "a" (old),
"a" (__do_softirq)
: "0", "1", "2", "3", "4", "5", "14",
"cc", "memory" );
} else {
/* We are already on the async stack. */
__do_softirq();
}
}
local_irq_restore(flags);
}
#ifdef CONFIG_PROC_FS
void init_irq_proc(void)
{
if (proc_mkdir("irq", NULL))
create_prof_cpu_mask();
}
#endif
/*
* ext_int_hash[index] is the list head for all external interrupts that hash
* to this index.
*/
static struct list_head ext_int_hash[256];
struct ext_int_info {
ext_int_handler_t handler;
u16 code;
struct list_head entry;
struct rcu_head rcu;
};
/* ext_int_hash_lock protects the handler lists for external interrupts */
DEFINE_SPINLOCK(ext_int_hash_lock);
static void __init init_external_interrupts(void)
{
int idx;
for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
INIT_LIST_HEAD(&ext_int_hash[idx]);
}
static inline int ext_hash(u16 code)
{
return (code + (code >> 9)) & 0xff;
}
int register_external_interrupt(u16 code, ext_int_handler_t handler)
{
struct ext_int_info *p;
unsigned long flags;
int index;
p = kmalloc(sizeof(*p), GFP_ATOMIC);
if (!p)
return -ENOMEM;
p->code = code;
p->handler = handler;
index = ext_hash(code);
spin_lock_irqsave(&ext_int_hash_lock, flags);
list_add_rcu(&p->entry, &ext_int_hash[index]);
spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
EXPORT_SYMBOL(register_external_interrupt);
int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
{
struct ext_int_info *p;
unsigned long flags;
int index = ext_hash(code);
spin_lock_irqsave(&ext_int_hash_lock, flags);
list_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
if (p->code == code && p->handler == handler) {
list_del_rcu(&p->entry);
kfree_rcu(p, rcu);
}
}
spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
EXPORT_SYMBOL(unregister_external_interrupt);
void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct pt_regs *old_regs;
struct ext_int_info *p;
int index;
old_regs = set_irq_regs(regs);
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) {
/* Serve timer interrupts first. */
clock_comparator_work();
}
kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
if (ext_code.code != 0x1004)
__get_cpu_var(s390_idle).nohz_delay = 1;
index = ext_hash(ext_code.code);
rcu_read_lock();
list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
if (likely(p->code == ext_code.code))
p->handler(ext_code, param32, param64);
rcu_read_unlock();
irq_exit();
set_irq_regs(old_regs);
}
void __init init_IRQ(void)
{
init_external_interrupts();
}
static DEFINE_SPINLOCK(sc_irq_lock);
static int sc_irq_refcount;
void service_subclass_irq_register(void)
{
spin_lock(&sc_irq_lock);
if (!sc_irq_refcount)
ctl_set_bit(0, 9);
sc_irq_refcount++;
spin_unlock(&sc_irq_lock);
}
EXPORT_SYMBOL(service_subclass_irq_register);
void service_subclass_irq_unregister(void)
{
spin_lock(&sc_irq_lock);
sc_irq_refcount--;
if (!sc_irq_refcount)
ctl_clear_bit(0, 9);
spin_unlock(&sc_irq_lock);
}
EXPORT_SYMBOL(service_subclass_irq_unregister);
static DEFINE_SPINLOCK(ma_subclass_lock);
static int ma_subclass_refcount;
void measurement_alert_subclass_register(void)
{
spin_lock(&ma_subclass_lock);
if (!ma_subclass_refcount)
ctl_set_bit(0, 5);
ma_subclass_refcount++;
spin_unlock(&ma_subclass_lock);
}
EXPORT_SYMBOL(measurement_alert_subclass_register);
void measurement_alert_subclass_unregister(void)
{
spin_lock(&ma_subclass_lock);
ma_subclass_refcount--;
if (!ma_subclass_refcount)
ctl_clear_bit(0, 5);
spin_unlock(&ma_subclass_lock);
}
EXPORT_SYMBOL(measurement_alert_subclass_unregister);
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
{
/*
* Not needed, the handler is protected by a lock and IRQs that occur
* after the handler is deleted are just NOPs.
*/
}
EXPORT_SYMBOL_GPL(synchronize_irq);
#endif
#ifndef CONFIG_PCI
/* Only PCI devices have dynamically-defined IRQ handlers */
int request_irq(unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
return -EINVAL;
}
EXPORT_SYMBOL_GPL(request_irq);
void free_irq(unsigned int irq, void *dev_id)
{
WARN_ON(1);
}
EXPORT_SYMBOL_GPL(free_irq);
void enable_irq(unsigned int irq)
{
WARN_ON(1);
}
EXPORT_SYMBOL_GPL(enable_irq);
void disable_irq(unsigned int irq)
{
WARN_ON(1);
}
EXPORT_SYMBOL_GPL(disable_irq);
#endif /* !CONFIG_PCI */
void disable_irq_nosync(unsigned int irq)
{
disable_irq(irq);
}
EXPORT_SYMBOL_GPL(disable_irq_nosync);
unsigned long probe_irq_on(void)
{
return 0;
}
EXPORT_SYMBOL_GPL(probe_irq_on);
int probe_irq_off(unsigned long val)
{
return 0;
}
EXPORT_SYMBOL_GPL(probe_irq_off);
unsigned int probe_irq_mask(unsigned long val)
{
return val;
}
EXPORT_SYMBOL_GPL(probe_irq_mask);
| gpl-2.0 |
kendling/android_kernel_google_dragon | net/wimax/op-state-get.c | 1881 | 2135 | /*
* Linux WiMAX
* Implement and export a method for getting a WiMAX device current state
*
* Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
*
* Based on previous WiMAX core work by:
* Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <net/wimax.h>
#include <net/genetlink.h>
#include <linux/wimax.h>
#include <linux/security.h>
#include "wimax-internal.h"
#define D_SUBMODULE op_state_get
#include "debug-levels.h"
/*
* Exporting to user space over generic netlink
*
* Parse the state get command from user space, return a combination
* value that describe the current state.
*
* No attributes.
*/
int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
{
int result, ifindex;
struct wimax_dev *wimax_dev;
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) {
pr_err("WIMAX_GNL_OP_STATE_GET: can't find IFIDX attribute\n");
goto error_no_wimax_dev;
}
ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]);
wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
if (wimax_dev == NULL)
goto error_no_wimax_dev;
/* Execute the operation and send the result back to user space */
result = wimax_state_get(wimax_dev);
dev_put(wimax_dev->net_dev);
error_no_wimax_dev:
d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
return result;
}
| gpl-2.0 |
TeamSPR/kernel | sound/pci/hda/patch_ca0110.c | 2649 | 2995 | /*
* HD audio interface patch for Creative X-Fi CA0110-IBG chip
*
* Copyright (c) 2008 Takashi Iwai <tiwai@suse.de>
*
* This driver is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This driver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
#include "hda_generic.h"
static const struct hda_codec_ops ca0110_patch_ops = {
.build_controls = snd_hda_gen_build_controls,
.build_pcms = snd_hda_gen_build_pcms,
.init = snd_hda_gen_init,
.free = snd_hda_gen_free,
.unsol_event = snd_hda_jack_unsol_event,
};
static int ca0110_parse_auto_config(struct hda_codec *codec)
{
struct hda_gen_spec *spec = codec->spec;
int err;
err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
if (err < 0)
return err;
err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
if (err < 0)
return err;
return 0;
}
static int patch_ca0110(struct hda_codec *codec)
{
struct hda_gen_spec *spec;
int err;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
snd_hda_gen_spec_init(spec);
codec->spec = spec;
spec->multi_cap_vol = 1;
codec->bus->needs_damn_long_delay = 1;
err = ca0110_parse_auto_config(codec);
if (err < 0)
goto error;
codec->patch_ops = ca0110_patch_ops;
return 0;
error:
snd_hda_gen_free(codec);
return err;
}
/*
* patch entries
*/
static const struct hda_codec_preset snd_hda_preset_ca0110[] = {
{ .id = 0x1102000a, .name = "CA0110-IBG", .patch = patch_ca0110 },
{ .id = 0x1102000b, .name = "CA0110-IBG", .patch = patch_ca0110 },
{ .id = 0x1102000d, .name = "SB0880 X-Fi", .patch = patch_ca0110 },
{} /* terminator */
};
MODULE_ALIAS("snd-hda-codec-id:1102000a");
MODULE_ALIAS("snd-hda-codec-id:1102000b");
MODULE_ALIAS("snd-hda-codec-id:1102000d");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Creative CA0110-IBG HD-audio codec");
static struct hda_codec_preset_list ca0110_list = {
.preset = snd_hda_preset_ca0110,
.owner = THIS_MODULE,
};
static int __init patch_ca0110_init(void)
{
return snd_hda_add_codec_preset(&ca0110_list);
}
static void __exit patch_ca0110_exit(void)
{
snd_hda_delete_codec_preset(&ca0110_list);
}
module_init(patch_ca0110_init)
module_exit(patch_ca0110_exit)
| gpl-2.0 |
go2ev-devteam/linux-tk1 | drivers/video/mmp/hw/mmp_spi.c | 4697 | 4684 | /*
* linux/drivers/video/mmp/hw/mmp_spi.c
* using the spi in LCD controler for commands send
*
* Copyright (C) 2012 Marvell Technology Group Ltd.
* Authors: Guoqing Li <ligq@marvell.com>
* Lisa Du <cldu@marvell.com>
* Zhou Zhu <zzhu3@marvell.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include "mmp_ctrl.h"
/**
* spi_write - write command to the SPI port
* @data: can be 8/16/32-bit, MSB justified data to write.
* @len: data length.
*
* Wait bus transfer complete IRQ.
* The caller is expected to perform the necessary locking.
*
* Returns:
* %-ETIMEDOUT timeout occurred
* 0 success
*/
static inline int lcd_spi_write(struct spi_device *spi, u32 data)
{
int timeout = 100000, isr, ret = 0;
u32 tmp;
void *reg_base =
*(void **)spi_master_get_devdata(spi->master);
/* clear ISR */
writel_relaxed(~SPI_IRQ_MASK, reg_base + SPU_IRQ_ISR);
switch (spi->bits_per_word) {
case 8:
writel_relaxed((u8)data, reg_base + LCD_SPU_SPI_TXDATA);
break;
case 16:
writel_relaxed((u16)data, reg_base + LCD_SPU_SPI_TXDATA);
break;
case 32:
writel_relaxed((u32)data, reg_base + LCD_SPU_SPI_TXDATA);
break;
default:
dev_err(&spi->dev, "Wrong spi bit length\n");
}
/* SPI start to send command */
tmp = readl_relaxed(reg_base + LCD_SPU_SPI_CTRL);
tmp &= ~CFG_SPI_START_MASK;
tmp |= CFG_SPI_START(1);
writel(tmp, reg_base + LCD_SPU_SPI_CTRL);
isr = readl_relaxed(reg_base + SPU_IRQ_ISR);
while (!(isr & SPI_IRQ_ENA_MASK)) {
udelay(100);
isr = readl_relaxed(reg_base + SPU_IRQ_ISR);
if (!--timeout) {
ret = -ETIMEDOUT;
dev_err(&spi->dev, "spi cmd send time out\n");
break;
}
}
tmp = readl_relaxed(reg_base + LCD_SPU_SPI_CTRL);
tmp &= ~CFG_SPI_START_MASK;
tmp |= CFG_SPI_START(0);
writel_relaxed(tmp, reg_base + LCD_SPU_SPI_CTRL);
writel_relaxed(~SPI_IRQ_MASK, reg_base + SPU_IRQ_ISR);
return ret;
}
static int lcd_spi_setup(struct spi_device *spi)
{
void *reg_base =
*(void **)spi_master_get_devdata(spi->master);
u32 tmp;
tmp = CFG_SCLKCNT(16) |
CFG_TXBITS(spi->bits_per_word) |
CFG_SPI_SEL(1) | CFG_SPI_ENA(1) |
CFG_SPI_3W4WB(1);
writel(tmp, reg_base + LCD_SPU_SPI_CTRL);
/*
* After set mode it need a time to pull up the spi singals,
* or it would cause the wrong waveform when send spi command,
* especially on pxa910h
*/
tmp = readl_relaxed(reg_base + SPU_IOPAD_CONTROL);
if ((tmp & CFG_IOPADMODE_MASK) != IOPAD_DUMB18SPI)
writel_relaxed(IOPAD_DUMB18SPI |
(tmp & ~CFG_IOPADMODE_MASK),
reg_base + SPU_IOPAD_CONTROL);
udelay(20);
return 0;
}
static int lcd_spi_one_transfer(struct spi_device *spi, struct spi_message *m)
{
struct spi_transfer *t;
int i;
list_for_each_entry(t, &m->transfers, transfer_list) {
switch (spi->bits_per_word) {
case 8:
for (i = 0; i < t->len; i++)
lcd_spi_write(spi, ((u8 *)t->tx_buf)[i]);
break;
case 16:
for (i = 0; i < t->len/2; i++)
lcd_spi_write(spi, ((u16 *)t->tx_buf)[i]);
break;
case 32:
for (i = 0; i < t->len/4; i++)
lcd_spi_write(spi, ((u32 *)t->tx_buf)[i]);
break;
default:
dev_err(&spi->dev, "Wrong spi bit length\n");
}
}
m->status = 0;
if (m->complete)
m->complete(m->context);
return 0;
}
int lcd_spi_register(struct mmphw_ctrl *ctrl)
{
struct spi_master *master;
void **p_regbase;
int err;
master = spi_alloc_master(ctrl->dev, sizeof(void *));
if (!master) {
dev_err(ctrl->dev, "unable to allocate SPI master\n");
return -ENOMEM;
}
p_regbase = spi_master_get_devdata(master);
*p_regbase = ctrl->reg_base;
/* set bus num to 5 to avoid conflict with other spi hosts */
master->bus_num = 5;
master->num_chipselect = 1;
master->setup = lcd_spi_setup;
master->transfer = lcd_spi_one_transfer;
err = spi_register_master(master);
if (err < 0) {
dev_err(ctrl->dev, "unable to register SPI master\n");
spi_master_put(master);
return err;
}
dev_info(&master->dev, "registered\n");
return 0;
}
| gpl-2.0 |
mer-hybris-kis3/android_kernel_zte_msm8610 | arch/arm/mach-omap2/omap4-common.c | 4697 | 4749 | /*
* OMAP4 specific common source file.
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Author:
* Santosh Shilimkar <santosh.shilimkar@ti.com>
*
*
* This program is free software,you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/memblock.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/map.h>
#include <asm/memblock.h>
#include <plat/irqs.h>
#include <plat/sram.h>
#include <plat/omap-secure.h>
#include <mach/hardware.h>
#include <mach/omap-wakeupgen.h>
#include "common.h"
#include "omap4-sar-layout.h"
#include <linux/export.h>
#ifdef CONFIG_CACHE_L2X0
static void __iomem *l2cache_base;
#endif
static void __iomem *sar_ram_base;
#ifdef CONFIG_OMAP4_ERRATA_I688
/* Used to implement memory barrier on DRAM path */
#define OMAP4_DRAM_BARRIER_VA 0xfe600000
void __iomem *dram_sync, *sram_sync;
static phys_addr_t paddr;
static u32 size;
void omap_bus_sync(void)
{
if (dram_sync && sram_sync) {
writel_relaxed(readl_relaxed(dram_sync), dram_sync);
writel_relaxed(readl_relaxed(sram_sync), sram_sync);
isb();
}
}
EXPORT_SYMBOL(omap_bus_sync);
/* Steal one page physical memory for barrier implementation */
int __init omap_barrier_reserve_memblock(void)
{
size = ALIGN(PAGE_SIZE, SZ_1M);
paddr = arm_memblock_steal(size, SZ_1M);
return 0;
}
void __init omap_barriers_init(void)
{
struct map_desc dram_io_desc[1];
dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
dram_io_desc[0].pfn = __phys_to_pfn(paddr);
dram_io_desc[0].length = size;
dram_io_desc[0].type = MT_MEMORY_SO;
iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
dram_sync = (void __iomem *) dram_io_desc[0].virtual;
sram_sync = (void __iomem *) OMAP4_SRAM_VA;
pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
(long long) paddr, dram_io_desc[0].virtual);
}
#else
void __init omap_barriers_init(void)
{}
#endif
void __init gic_init_irq(void)
{
void __iomem *omap_irq_base;
void __iomem *gic_dist_base_addr;
/* Static mapping, never released */
gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
BUG_ON(!gic_dist_base_addr);
/* Static mapping, never released */
omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
BUG_ON(!omap_irq_base);
omap_wakeupgen_init();
gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
}
#ifdef CONFIG_CACHE_L2X0
void __iomem *omap4_get_l2cache_base(void)
{
return l2cache_base;
}
static void omap4_l2x0_disable(void)
{
/* Disable PL310 L2 Cache controller */
omap_smc1(0x102, 0x0);
}
static void omap4_l2x0_set_debug(unsigned long val)
{
/* Program PL310 L2 Cache controller debug register */
omap_smc1(0x100, val);
}
static int __init omap_l2_cache_init(void)
{
u32 aux_ctrl = 0;
/*
* To avoid code running on other OMAPs in
* multi-omap builds
*/
if (!cpu_is_omap44xx())
return -ENODEV;
/* Static mapping, never released */
l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
if (WARN_ON(!l2cache_base))
return -ENOMEM;
/*
* 16-way associativity, parity disabled
* Way size - 32KB (es1.0)
* Way size - 64KB (es2.0 +)
*/
aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
(0x1 << 25) |
(0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
(0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
if (omap_rev() == OMAP4430_REV_ES1_0) {
aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
} else {
aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
(1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
(1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
(1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
(1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
}
if (omap_rev() != OMAP4430_REV_ES1_0)
omap_smc1(0x109, aux_ctrl);
/* Enable PL310 L2 Cache controller */
omap_smc1(0x102, 0x1);
l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
/*
* Override default outer_cache.disable with a OMAP4
* specific one
*/
outer_cache.disable = omap4_l2x0_disable;
outer_cache.set_debug = omap4_l2x0_set_debug;
return 0;
}
early_initcall(omap_l2_cache_init);
#endif
void __iomem *omap4_get_sar_ram_base(void)
{
return sar_ram_base;
}
/*
* SAR RAM used to save and restore the HW
* context in low power modes
*/
static int __init omap4_sar_ram_init(void)
{
/*
* To avoid code running on other OMAPs in
* multi-omap builds
*/
if (!cpu_is_omap44xx())
return -ENOMEM;
/* Static mapping, never released */
sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K);
if (WARN_ON(!sar_ram_base))
return -ENOMEM;
return 0;
}
early_initcall(omap4_sar_ram_init);
| gpl-2.0 |
ion-storm/Unleashed-N4 | drivers/media/video/cx18/cx18-vbi.c | 8281 | 8190 | /*
* cx18 Vertical Blank Interval support functions
*
* Derived from ivtv-vbi.c
*
* Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307 USA
*/
#include "cx18-driver.h"
#include "cx18-vbi.h"
#include "cx18-ioctl.h"
#include "cx18-queue.h"
/*
* Raster Reference/Protection (RP) bytes, used in Start/End Active
* Video codes emitted from the digitzer in VIP 1.x mode, that flag the start
* of VBI sample or VBI ancillary data regions in the digitial ratser line.
*
* Task FieldEven VerticalBlank HorizontalBlank 0 0 0 0
*/
static const u8 raw_vbi_sav_rp[2] = { 0x20, 0x60 }; /* __V_, _FV_ */
static const u8 sliced_vbi_eav_rp[2] = { 0xb0, 0xf0 }; /* T_VH, TFVH */
static void copy_vbi_data(struct cx18 *cx, int lines, u32 pts_stamp)
{
int line = 0;
int i;
u32 linemask[2] = { 0, 0 };
unsigned short size;
static const u8 mpeg_hdr_data[] = {
/* MPEG-2 Program Pack */
0x00, 0x00, 0x01, 0xba, /* Prog Pack start code */
0x44, 0x00, 0x0c, 0x66, 0x24, 0x01, /* SCR, SCR Ext, markers */
0x01, 0xd1, 0xd3, /* Mux Rate, markers */
0xfa, 0xff, 0xff, /* Res, Suff cnt, Stuff */
/* MPEG-2 Private Stream 1 PES Packet */
0x00, 0x00, 0x01, 0xbd, /* Priv Stream 1 start */
0x00, 0x1a, /* length */
0x84, 0x80, 0x07, /* flags, hdr data len */
0x21, 0x00, 0x5d, 0x63, 0xa7, /* PTS, markers */
0xff, 0xff /* stuffing */
};
const int sd = sizeof(mpeg_hdr_data); /* start of vbi data */
int idx = cx->vbi.frame % CX18_VBI_FRAMES;
u8 *dst = &cx->vbi.sliced_mpeg_data[idx][0];
for (i = 0; i < lines; i++) {
struct v4l2_sliced_vbi_data *sdata = cx->vbi.sliced_data + i;
int f, l;
if (sdata->id == 0)
continue;
l = sdata->line - 6;
f = sdata->field;
if (f)
l += 18;
if (l < 32)
linemask[0] |= (1 << l);
else
linemask[1] |= (1 << (l - 32));
dst[sd + 12 + line * 43] = cx18_service2vbi(sdata->id);
memcpy(dst + sd + 12 + line * 43 + 1, sdata->data, 42);
line++;
}
memcpy(dst, mpeg_hdr_data, sizeof(mpeg_hdr_data));
if (line == 36) {
/* All lines are used, so there is no space for the linemask
(the max size of the VBI data is 36 * 43 + 4 bytes).
So in this case we use the magic number 'ITV0'. */
memcpy(dst + sd, "ITV0", 4);
memcpy(dst + sd + 4, dst + sd + 12, line * 43);
size = 4 + ((43 * line + 3) & ~3);
} else {
memcpy(dst + sd, "itv0", 4);
cpu_to_le32s(&linemask[0]);
cpu_to_le32s(&linemask[1]);
memcpy(dst + sd + 4, &linemask[0], 8);
size = 12 + ((43 * line + 3) & ~3);
}
dst[4+16] = (size + 10) >> 8;
dst[5+16] = (size + 10) & 0xff;
dst[9+16] = 0x21 | ((pts_stamp >> 29) & 0x6);
dst[10+16] = (pts_stamp >> 22) & 0xff;
dst[11+16] = 1 | ((pts_stamp >> 14) & 0xff);
dst[12+16] = (pts_stamp >> 7) & 0xff;
dst[13+16] = 1 | ((pts_stamp & 0x7f) << 1);
cx->vbi.sliced_mpeg_size[idx] = sd + size;
}
/* Compress raw VBI format, removes leading SAV codes and surplus space
after the frame. Returns new compressed size. */
/* FIXME - this function ignores the input size. */
static u32 compress_raw_buf(struct cx18 *cx, u8 *buf, u32 size, u32 hdr_size)
{
u32 line_size = vbi_active_samples;
u32 lines = cx->vbi.count * 2;
u8 *q = buf;
u8 *p;
int i;
/* Skip the header */
buf += hdr_size;
for (i = 0; i < lines; i++) {
p = buf + i * line_size;
/* Look for SAV code */
if (p[0] != 0xff || p[1] || p[2] ||
(p[3] != raw_vbi_sav_rp[0] &&
p[3] != raw_vbi_sav_rp[1]))
break;
if (i == lines - 1) {
/* last line is hdr_size bytes short - extrapolate it */
memcpy(q, p + 4, line_size - 4 - hdr_size);
q += line_size - 4 - hdr_size;
p += line_size - hdr_size - 1;
memset(q, (int) *p, hdr_size);
} else {
memcpy(q, p + 4, line_size - 4);
q += line_size - 4;
}
}
return lines * (line_size - 4);
}
static u32 compress_sliced_buf(struct cx18 *cx, u8 *buf, u32 size,
const u32 hdr_size)
{
struct v4l2_decode_vbi_line vbi;
int i;
u32 line = 0;
u32 line_size = cx->is_60hz ? vbi_hblank_samples_60Hz
: vbi_hblank_samples_50Hz;
/* find the first valid line */
for (i = hdr_size, buf += hdr_size; i < size; i++, buf++) {
if (buf[0] == 0xff && !buf[1] && !buf[2] &&
(buf[3] == sliced_vbi_eav_rp[0] ||
buf[3] == sliced_vbi_eav_rp[1]))
break;
}
/*
* The last line is short by hdr_size bytes, but for the remaining
* checks against size, we pretend that it is not, by counting the
* header bytes we knowingly skipped
*/
size -= (i - hdr_size);
if (size < line_size)
return line;
for (i = 0; i < size / line_size; i++) {
u8 *p = buf + i * line_size;
/* Look for EAV code */
if (p[0] != 0xff || p[1] || p[2] ||
(p[3] != sliced_vbi_eav_rp[0] &&
p[3] != sliced_vbi_eav_rp[1]))
continue;
vbi.p = p + 4;
v4l2_subdev_call(cx->sd_av, vbi, decode_vbi_line, &vbi);
if (vbi.type) {
cx->vbi.sliced_data[line].id = vbi.type;
cx->vbi.sliced_data[line].field = vbi.is_second_field;
cx->vbi.sliced_data[line].line = vbi.line;
memcpy(cx->vbi.sliced_data[line].data, vbi.p, 42);
line++;
}
}
return line;
}
static void _cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf)
{
/*
* The CX23418 provides a 12 byte header in its raw VBI buffers to us:
* 0x3fffffff [4 bytes of something] [4 byte presentation time stamp]
*/
struct vbi_data_hdr {
__be32 magic;
__be32 unknown;
__be32 pts;
} *hdr = (struct vbi_data_hdr *) buf->buf;
u8 *p = (u8 *) buf->buf;
u32 size = buf->bytesused;
u32 pts;
int lines;
/*
* The CX23418 sends us data that is 32 bit little-endian swapped,
* but we want the raw VBI bytes in the order they were in the raster
* line. This has a side effect of making the header big endian
*/
cx18_buf_swap(buf);
/* Raw VBI data */
if (cx18_raw_vbi(cx)) {
size = buf->bytesused =
compress_raw_buf(cx, p, size, sizeof(struct vbi_data_hdr));
/*
* Hack needed for compatibility with old VBI software.
* Write the frame # at the last 4 bytes of the frame
*/
p += size - 4;
memcpy(p, &cx->vbi.frame, 4);
cx->vbi.frame++;
return;
}
/* Sliced VBI data with data insertion */
pts = (be32_to_cpu(hdr->magic) == 0x3fffffff) ? be32_to_cpu(hdr->pts)
: 0;
lines = compress_sliced_buf(cx, p, size, sizeof(struct vbi_data_hdr));
/* always return at least one empty line */
if (lines == 0) {
cx->vbi.sliced_data[0].id = 0;
cx->vbi.sliced_data[0].line = 0;
cx->vbi.sliced_data[0].field = 0;
lines = 1;
}
buf->bytesused = size = lines * sizeof(cx->vbi.sliced_data[0]);
memcpy(p, &cx->vbi.sliced_data[0], size);
if (cx->vbi.insert_mpeg)
copy_vbi_data(cx, lines, pts);
cx->vbi.frame++;
}
void cx18_process_vbi_data(struct cx18 *cx, struct cx18_mdl *mdl,
int streamtype)
{
struct cx18_buffer *buf;
u32 orig_used;
if (streamtype != CX18_ENC_STREAM_TYPE_VBI)
return;
/*
* Big assumption here:
* Every buffer hooked to the MDL's buf_list is a complete VBI frame
* that ends at the end of the buffer.
*
* To assume anything else would make the code in this file
* more complex, or require extra memcpy()'s to make the
* buffers satisfy the above assumption. It's just simpler to set
* up the encoder buffer transfers to make the assumption true.
*/
list_for_each_entry(buf, &mdl->buf_list, list) {
orig_used = buf->bytesused;
if (orig_used == 0)
break;
_cx18_process_vbi_data(cx, buf);
mdl->bytesused -= (orig_used - buf->bytesused);
}
}
| gpl-2.0 |
invisiblek/android_kernel_samsung_msm8960 | arch/mips/alchemy/common/vss.c | 9049 | 2204 | /*
* Au1300 media block power gating (VSS)
*
* This is a stop-gap solution until I have the clock framework integration
* ready. This stuff here really must be handled transparently when clocks
* for various media blocks are enabled/disabled.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/mach-au1x00/au1000.h>
#define VSS_GATE 0x00 /* gate wait timers */
#define VSS_CLKRST 0x04 /* clock/block control */
#define VSS_FTR 0x08 /* footers */
#define VSS_ADDR(blk) (KSEG1ADDR(AU1300_VSS_PHYS_ADDR) + (blk * 0x0c))
static DEFINE_SPINLOCK(au1300_vss_lock);
/* enable a block as outlined in the databook */
static inline void __enable_block(int block)
{
void __iomem *base = (void __iomem *)VSS_ADDR(block);
__raw_writel(3, base + VSS_CLKRST); /* enable clock, assert reset */
wmb();
__raw_writel(0x01fffffe, base + VSS_GATE); /* maximum setup time */
wmb();
/* enable footers in sequence */
__raw_writel(0x01, base + VSS_FTR);
wmb();
__raw_writel(0x03, base + VSS_FTR);
wmb();
__raw_writel(0x07, base + VSS_FTR);
wmb();
__raw_writel(0x0f, base + VSS_FTR);
wmb();
__raw_writel(0x01ffffff, base + VSS_GATE); /* start FSM too */
wmb();
__raw_writel(2, base + VSS_CLKRST); /* deassert reset */
wmb();
__raw_writel(0x1f, base + VSS_FTR); /* enable isolation cells */
wmb();
}
/* disable a block as outlined in the databook */
static inline void __disable_block(int block)
{
void __iomem *base = (void __iomem *)VSS_ADDR(block);
__raw_writel(0x0f, base + VSS_FTR); /* disable isolation cells */
wmb();
__raw_writel(0, base + VSS_GATE); /* disable FSM */
wmb();
__raw_writel(3, base + VSS_CLKRST); /* assert reset */
wmb();
__raw_writel(1, base + VSS_CLKRST); /* disable clock */
wmb();
__raw_writel(0, base + VSS_FTR); /* disable all footers */
wmb();
}
void au1300_vss_block_control(int block, int enable)
{
unsigned long flags;
if (alchemy_get_cputype() != ALCHEMY_CPU_AU1300)
return;
/* only one block at a time */
spin_lock_irqsave(&au1300_vss_lock, flags);
if (enable)
__enable_block(block);
else
__disable_block(block);
spin_unlock_irqrestore(&au1300_vss_lock, flags);
}
EXPORT_SYMBOL_GPL(au1300_vss_block_control);
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_SHV-E110S | drivers/ata/pata_piccolo.c | 9049 | 3908 | /*
* pata_piccolo.c - Toshiba Piccolo PATA/SATA controller driver.
*
* This is basically an update to ata_generic.c to add Toshiba Piccolo support
* then split out to keep ata_generic "clean".
*
* Copyright 2005 Red Hat Inc, all rights reserved.
*
* Elements from ide/pci/generic.c
* Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
* Portions (C) Copyright 2002 Red Hat Inc <alan@redhat.com>
*
* May be copied or modified under the terms of the GNU General Public License
*
* The timing data tables/programming info are courtesy of the NetBSD driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_piccolo"
#define DRV_VERSION "0.0.1"
static void tosh_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u16 pio[6] = { /* For reg 0x50 low word & E088 */
0x0566, 0x0433, 0x0311, 0x0201, 0x0200, 0x0100
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 conf;
pci_read_config_word(pdev, 0x50, &conf);
conf &= 0xE088;
conf |= pio[adev->pio_mode - XFER_PIO_0];
pci_write_config_word(pdev, 0x50, conf);
}
static void tosh_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 conf;
pci_read_config_dword(pdev, 0x5C, &conf);
conf &= 0x78FFE088; /* Keep the other bits */
if (adev->dma_mode >= XFER_UDMA_0) {
int udma = adev->dma_mode - XFER_UDMA_0;
conf |= 0x80000000;
conf |= (udma + 2) << 28;
conf |= (2 - udma) * 0x111; /* spread into three nibbles */
} else {
static const u32 mwdma[4] = {
0x0655, 0x0200, 0x0200, 0x0100
};
conf |= mwdma[adev->dma_mode - XFER_MW_DMA_0];
}
pci_write_config_dword(pdev, 0x5C, conf);
}
static struct scsi_host_template tosh_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations tosh_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_unknown,
.set_piomode = tosh_set_piomode,
.set_dmamode = tosh_set_dmamode
};
/**
* ata_tosh_init - attach generic IDE
* @dev: PCI device found
* @id: match entry
*
* Called each time a matching IDE interface is found. We check if the
* interface is one we wish to claim and if so we perform any chip
* specific hacks then let the ATA layer do the heavy lifting.
*/
static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &tosh_port_ops
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
/* Just one port for the moment */
return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
static struct pci_device_id ata_tosh[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), },
{ 0, },
};
static struct pci_driver ata_tosh_pci_driver = {
.name = DRV_NAME,
.id_table = ata_tosh,
.probe = ata_tosh_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
static int __init ata_tosh_init(void)
{
return pci_register_driver(&ata_tosh_pci_driver);
}
static void __exit ata_tosh_exit(void)
{
pci_unregister_driver(&ata_tosh_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("Low level driver for Toshiba Piccolo ATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ata_tosh);
MODULE_VERSION(DRV_VERSION);
module_init(ata_tosh_init);
module_exit(ata_tosh_exit);
| gpl-2.0 |
jjhmod/mk908-jjh-kernel | security/selinux/ss/sidtab.c | 12633 | 6011 | /*
* Implementation of the SID table type.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include "flask.h"
#include "security.h"
#include "sidtab.h"
#define SIDTAB_HASH(sid) \
(sid & SIDTAB_HASH_MASK)
int sidtab_init(struct sidtab *s)
{
int i;
s->htable = kmalloc(sizeof(*(s->htable)) * SIDTAB_SIZE, GFP_ATOMIC);
if (!s->htable)
return -ENOMEM;
for (i = 0; i < SIDTAB_SIZE; i++)
s->htable[i] = NULL;
s->nel = 0;
s->next_sid = 1;
s->shutdown = 0;
spin_lock_init(&s->lock);
return 0;
}
int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
{
int hvalue, rc = 0;
struct sidtab_node *prev, *cur, *newnode;
if (!s) {
rc = -ENOMEM;
goto out;
}
hvalue = SIDTAB_HASH(sid);
prev = NULL;
cur = s->htable[hvalue];
while (cur && sid > cur->sid) {
prev = cur;
cur = cur->next;
}
if (cur && sid == cur->sid) {
rc = -EEXIST;
goto out;
}
newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
if (newnode == NULL) {
rc = -ENOMEM;
goto out;
}
newnode->sid = sid;
if (context_cpy(&newnode->context, context)) {
kfree(newnode);
rc = -ENOMEM;
goto out;
}
if (prev) {
newnode->next = prev->next;
wmb();
prev->next = newnode;
} else {
newnode->next = s->htable[hvalue];
wmb();
s->htable[hvalue] = newnode;
}
s->nel++;
if (sid >= s->next_sid)
s->next_sid = sid + 1;
out:
return rc;
}
static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
{
int hvalue;
struct sidtab_node *cur;
if (!s)
return NULL;
hvalue = SIDTAB_HASH(sid);
cur = s->htable[hvalue];
while (cur && sid > cur->sid)
cur = cur->next;
if (force && cur && sid == cur->sid && cur->context.len)
return &cur->context;
if (cur == NULL || sid != cur->sid || cur->context.len) {
/* Remap invalid SIDs to the unlabeled SID. */
sid = SECINITSID_UNLABELED;
hvalue = SIDTAB_HASH(sid);
cur = s->htable[hvalue];
while (cur && sid > cur->sid)
cur = cur->next;
if (!cur || sid != cur->sid)
return NULL;
}
return &cur->context;
}
struct context *sidtab_search(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 0);
}
struct context *sidtab_search_force(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 1);
}
int sidtab_map(struct sidtab *s,
int (*apply) (u32 sid,
struct context *context,
void *args),
void *args)
{
int i, rc = 0;
struct sidtab_node *cur;
if (!s)
goto out;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
rc = apply(cur->sid, &cur->context, args);
if (rc)
goto out;
cur = cur->next;
}
}
out:
return rc;
}
static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
{
BUG_ON(loc >= SIDTAB_CACHE_LEN);
while (loc > 0) {
s->cache[loc] = s->cache[loc - 1];
loc--;
}
s->cache[0] = n;
}
static inline u32 sidtab_search_context(struct sidtab *s,
struct context *context)
{
int i;
struct sidtab_node *cur;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
if (context_cmp(&cur->context, context)) {
sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
return cur->sid;
}
cur = cur->next;
}
}
return 0;
}
static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
{
int i;
struct sidtab_node *node;
for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
node = s->cache[i];
if (unlikely(!node))
return 0;
if (context_cmp(&node->context, context)) {
sidtab_update_cache(s, node, i);
return node->sid;
}
}
return 0;
}
int sidtab_context_to_sid(struct sidtab *s,
struct context *context,
u32 *out_sid)
{
u32 sid;
int ret = 0;
unsigned long flags;
*out_sid = SECSID_NULL;
sid = sidtab_search_cache(s, context);
if (!sid)
sid = sidtab_search_context(s, context);
if (!sid) {
spin_lock_irqsave(&s->lock, flags);
/* Rescan now that we hold the lock. */
sid = sidtab_search_context(s, context);
if (sid)
goto unlock_out;
/* No SID exists for the context. Allocate a new one. */
if (s->next_sid == UINT_MAX || s->shutdown) {
ret = -ENOMEM;
goto unlock_out;
}
sid = s->next_sid++;
if (context->len)
printk(KERN_INFO
"SELinux: Context %s is not valid (left unmapped).\n",
context->str);
ret = sidtab_insert(s, sid, context);
if (ret)
s->next_sid--;
unlock_out:
spin_unlock_irqrestore(&s->lock, flags);
}
if (ret)
return ret;
*out_sid = sid;
return 0;
}
void sidtab_hash_eval(struct sidtab *h, char *tag)
{
int i, chain_len, slots_used, max_chain_len;
struct sidtab_node *cur;
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = h->htable[i];
if (cur) {
slots_used++;
chain_len = 0;
while (cur) {
chain_len++;
cur = cur->next;
}
if (chain_len > max_chain_len)
max_chain_len = chain_len;
}
}
printk(KERN_DEBUG "%s: %d entries and %d/%d buckets used, longest "
"chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
max_chain_len);
}
void sidtab_destroy(struct sidtab *s)
{
int i;
struct sidtab_node *cur, *temp;
if (!s)
return;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
temp = cur;
cur = cur->next;
context_destroy(&temp->context);
kfree(temp);
}
s->htable[i] = NULL;
}
kfree(s->htable);
s->htable = NULL;
s->nel = 0;
s->next_sid = 1;
}
void sidtab_set(struct sidtab *dst, struct sidtab *src)
{
unsigned long flags;
int i;
spin_lock_irqsave(&src->lock, flags);
dst->htable = src->htable;
dst->nel = src->nel;
dst->next_sid = src->next_sid;
dst->shutdown = 0;
for (i = 0; i < SIDTAB_CACHE_LEN; i++)
dst->cache[i] = NULL;
spin_unlock_irqrestore(&src->lock, flags);
}
void sidtab_shutdown(struct sidtab *s)
{
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
s->shutdown = 1;
spin_unlock_irqrestore(&s->lock, flags);
}
| gpl-2.0 |
diegocortassa/android-kernel-mediacom-mp810c | fs/ufs/cylinder.c | 14681 | 5848 | /*
* linux/fs/ufs/cylinder.c
*
* Copyright (C) 1998
* Daniel Pirkl <daniel.pirkl@email.cz>
* Charles University, Faculty of Mathematics and Physics
*
* ext2 - inode (block) bitmap caching inspired
*/
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include "ufs_fs.h"
#include "ufs.h"
#include "swab.h"
#include "util.h"
/*
* Read cylinder group into cache. The memory space for ufs_cg_private_info
* structure is already allocated during ufs_read_super.
*/
static void ufs_read_cylinder (struct super_block * sb,
unsigned cgno, unsigned bitmap_nr)
{
struct ufs_sb_info * sbi = UFS_SB(sb);
struct ufs_sb_private_info * uspi;
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
unsigned i, j;
UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr);
uspi = sbi->s_uspi;
ucpi = sbi->s_ucpi[bitmap_nr];
ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno);
UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits;
/*
* We have already the first fragment of cylinder group block in buffer
*/
UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno];
for (i = 1; i < UCPI_UBH(ucpi)->count; i++)
if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i)))
goto failed;
sbi->s_cgno[bitmap_nr] = cgno;
ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx);
ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl);
ucpi->c_niblk = fs16_to_cpu(sb, ucg->cg_niblk);
ucpi->c_ndblk = fs32_to_cpu(sb, ucg->cg_ndblk);
ucpi->c_rotor = fs32_to_cpu(sb, ucg->cg_rotor);
ucpi->c_frotor = fs32_to_cpu(sb, ucg->cg_frotor);
ucpi->c_irotor = fs32_to_cpu(sb, ucg->cg_irotor);
ucpi->c_btotoff = fs32_to_cpu(sb, ucg->cg_btotoff);
ucpi->c_boff = fs32_to_cpu(sb, ucg->cg_boff);
ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
ucpi->c_freeoff = fs32_to_cpu(sb, ucg->cg_freeoff);
ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
UFSD("EXIT\n");
return;
failed:
for (j = 1; j < i; j++)
brelse (sbi->s_ucg[j]);
sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
}
/*
* Remove cylinder group from cache, doesn't release memory
* allocated for cylinder group (this is done at ufs_put_super only).
*/
void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
{
struct ufs_sb_info * sbi = UFS_SB(sb);
struct ufs_sb_private_info * uspi;
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
unsigned i;
UFSD("ENTER, bitmap_nr %u\n", bitmap_nr);
uspi = sbi->s_uspi;
if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
UFSD("EXIT\n");
return;
}
ucpi = sbi->s_ucpi[bitmap_nr];
ucg = ubh_get_ucg(UCPI_UBH(ucpi));
if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
ufs_panic (sb, "ufs_put_cylinder", "internal error");
return;
}
/*
* rotor is not so important data, so we put it to disk
* at the end of working with cylinder
*/
ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
brelse (UCPI_UBH(ucpi)->bh[i]);
}
sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
UFSD("EXIT\n");
}
/*
* Find cylinder group in cache and return it as pointer.
* If cylinder group is not in cache, we will load it from disk.
*
* The cache is managed by LRU algorithm.
*/
struct ufs_cg_private_info * ufs_load_cylinder (
struct super_block * sb, unsigned cgno)
{
struct ufs_sb_info * sbi = UFS_SB(sb);
struct ufs_sb_private_info * uspi;
struct ufs_cg_private_info * ucpi;
unsigned cg, i, j;
UFSD("ENTER, cgno %u\n", cgno);
uspi = sbi->s_uspi;
if (cgno >= uspi->s_ncg) {
ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
return NULL;
}
/*
* Cylinder group number cg it in cache and it was last used
*/
if (sbi->s_cgno[0] == cgno) {
UFSD("EXIT\n");
return sbi->s_ucpi[0];
}
/*
* Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
*/
if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
if (sbi->s_cgno[cgno] != cgno) {
ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
UFSD("EXIT (FAILED)\n");
return NULL;
}
else {
UFSD("EXIT\n");
return sbi->s_ucpi[cgno];
}
} else {
ufs_read_cylinder (sb, cgno, cgno);
UFSD("EXIT\n");
return sbi->s_ucpi[cgno];
}
}
/*
* Cylinder group number cg is in cache but it was not last used,
* we will move to the first position
*/
for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
cg = sbi->s_cgno[i];
ucpi = sbi->s_ucpi[i];
for (j = i; j > 0; j--) {
sbi->s_cgno[j] = sbi->s_cgno[j-1];
sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
}
sbi->s_cgno[0] = cg;
sbi->s_ucpi[0] = ucpi;
/*
* Cylinder group number cg is not in cache, we will read it from disk
* and put it to the first position
*/
} else {
if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
sbi->s_cg_loaded++;
else
ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
sbi->s_cgno[j] = sbi->s_cgno[j-1];
sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
}
sbi->s_ucpi[0] = ucpi;
ufs_read_cylinder (sb, cgno, 0);
}
UFSD("EXIT\n");
return sbi->s_ucpi[0];
}
| gpl-2.0 |
jeremymcrhat/Nexus_5X_kernel | fs/ufs/cylinder.c | 14681 | 5848 | /*
* linux/fs/ufs/cylinder.c
*
* Copyright (C) 1998
* Daniel Pirkl <daniel.pirkl@email.cz>
* Charles University, Faculty of Mathematics and Physics
*
* ext2 - inode (block) bitmap caching inspired
*/
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include "ufs_fs.h"
#include "ufs.h"
#include "swab.h"
#include "util.h"
/*
* Read cylinder group into cache. The memory space for ufs_cg_private_info
* structure is already allocated during ufs_read_super.
*/
static void ufs_read_cylinder (struct super_block * sb,
unsigned cgno, unsigned bitmap_nr)
{
struct ufs_sb_info * sbi = UFS_SB(sb);
struct ufs_sb_private_info * uspi;
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
unsigned i, j;
UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr);
uspi = sbi->s_uspi;
ucpi = sbi->s_ucpi[bitmap_nr];
ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno);
UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits;
/*
* We have already the first fragment of cylinder group block in buffer
*/
UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno];
for (i = 1; i < UCPI_UBH(ucpi)->count; i++)
if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i)))
goto failed;
sbi->s_cgno[bitmap_nr] = cgno;
ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx);
ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl);
ucpi->c_niblk = fs16_to_cpu(sb, ucg->cg_niblk);
ucpi->c_ndblk = fs32_to_cpu(sb, ucg->cg_ndblk);
ucpi->c_rotor = fs32_to_cpu(sb, ucg->cg_rotor);
ucpi->c_frotor = fs32_to_cpu(sb, ucg->cg_frotor);
ucpi->c_irotor = fs32_to_cpu(sb, ucg->cg_irotor);
ucpi->c_btotoff = fs32_to_cpu(sb, ucg->cg_btotoff);
ucpi->c_boff = fs32_to_cpu(sb, ucg->cg_boff);
ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
ucpi->c_freeoff = fs32_to_cpu(sb, ucg->cg_freeoff);
ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
UFSD("EXIT\n");
return;
failed:
for (j = 1; j < i; j++)
brelse (sbi->s_ucg[j]);
sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
}
/*
* Remove cylinder group from cache, doesn't release memory
* allocated for cylinder group (this is done at ufs_put_super only).
*/
void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
{
struct ufs_sb_info * sbi = UFS_SB(sb);
struct ufs_sb_private_info * uspi;
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
unsigned i;
UFSD("ENTER, bitmap_nr %u\n", bitmap_nr);
uspi = sbi->s_uspi;
if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
UFSD("EXIT\n");
return;
}
ucpi = sbi->s_ucpi[bitmap_nr];
ucg = ubh_get_ucg(UCPI_UBH(ucpi));
if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
ufs_panic (sb, "ufs_put_cylinder", "internal error");
return;
}
/*
* rotor is not so important data, so we put it to disk
* at the end of working with cylinder
*/
ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
brelse (UCPI_UBH(ucpi)->bh[i]);
}
sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
UFSD("EXIT\n");
}
/*
* Find cylinder group in cache and return it as pointer.
* If cylinder group is not in cache, we will load it from disk.
*
* The cache is managed by LRU algorithm.
*/
struct ufs_cg_private_info * ufs_load_cylinder (
struct super_block * sb, unsigned cgno)
{
struct ufs_sb_info * sbi = UFS_SB(sb);
struct ufs_sb_private_info * uspi;
struct ufs_cg_private_info * ucpi;
unsigned cg, i, j;
UFSD("ENTER, cgno %u\n", cgno);
uspi = sbi->s_uspi;
if (cgno >= uspi->s_ncg) {
ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
return NULL;
}
/*
* Cylinder group number cg it in cache and it was last used
*/
if (sbi->s_cgno[0] == cgno) {
UFSD("EXIT\n");
return sbi->s_ucpi[0];
}
/*
* Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
*/
if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
if (sbi->s_cgno[cgno] != cgno) {
ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
UFSD("EXIT (FAILED)\n");
return NULL;
}
else {
UFSD("EXIT\n");
return sbi->s_ucpi[cgno];
}
} else {
ufs_read_cylinder (sb, cgno, cgno);
UFSD("EXIT\n");
return sbi->s_ucpi[cgno];
}
}
/*
* Cylinder group number cg is in cache but it was not last used,
* we will move to the first position
*/
for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
cg = sbi->s_cgno[i];
ucpi = sbi->s_ucpi[i];
for (j = i; j > 0; j--) {
sbi->s_cgno[j] = sbi->s_cgno[j-1];
sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
}
sbi->s_cgno[0] = cg;
sbi->s_ucpi[0] = ucpi;
/*
* Cylinder group number cg is not in cache, we will read it from disk
* and put it to the first position
*/
} else {
if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
sbi->s_cg_loaded++;
else
ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
sbi->s_cgno[j] = sbi->s_cgno[j-1];
sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
}
sbi->s_ucpi[0] = ucpi;
ufs_read_cylinder (sb, cgno, 0);
}
UFSD("EXIT\n");
return sbi->s_ucpi[0];
}
| gpl-2.0 |
jimbojr/linux | arch/s390/kernel/ipl.c | 90 | 53874 | /*
* ipl/reipl/dump support for Linux on s390.
*
* Copyright IBM Corp. 2005, 2012
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
* Heiko Carstens <heiko.carstens@de.ibm.com>
* Volker Sameske <sameske@de.ibm.com>
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/crash_dump.h>
#include <linux/debug_locks.h>
#include <asm/diag.h>
#include <asm/ipl.h>
#include <asm/smp.h>
#include <asm/setup.h>
#include <asm/cpcmd.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/reset.h>
#include <asm/sclp.h>
#include <asm/checksum.h>
#include <asm/debug.h>
#include <asm/os_info.h>
#include "entry.h"
#define IPL_PARM_BLOCK_VERSION 0
#define IPL_UNKNOWN_STR "unknown"
#define IPL_CCW_STR "ccw"
#define IPL_FCP_STR "fcp"
#define IPL_FCP_DUMP_STR "fcp_dump"
#define IPL_NSS_STR "nss"
#define DUMP_CCW_STR "ccw"
#define DUMP_FCP_STR "fcp"
#define DUMP_NONE_STR "none"
/*
* Four shutdown trigger types are supported:
* - panic
* - halt
* - power off
* - reipl
* - restart
*/
#define ON_PANIC_STR "on_panic"
#define ON_HALT_STR "on_halt"
#define ON_POFF_STR "on_poff"
#define ON_REIPL_STR "on_reboot"
#define ON_RESTART_STR "on_restart"
struct shutdown_action;
struct shutdown_trigger {
char *name;
struct shutdown_action *action;
};
/*
* The following shutdown action types are supported:
*/
#define SHUTDOWN_ACTION_IPL_STR "ipl"
#define SHUTDOWN_ACTION_REIPL_STR "reipl"
#define SHUTDOWN_ACTION_DUMP_STR "dump"
#define SHUTDOWN_ACTION_VMCMD_STR "vmcmd"
#define SHUTDOWN_ACTION_STOP_STR "stop"
#define SHUTDOWN_ACTION_DUMP_REIPL_STR "dump_reipl"
struct shutdown_action {
char *name;
void (*fn) (struct shutdown_trigger *trigger);
int (*init) (void);
int init_rc;
};
static char *ipl_type_str(enum ipl_type type)
{
switch (type) {
case IPL_TYPE_CCW:
return IPL_CCW_STR;
case IPL_TYPE_FCP:
return IPL_FCP_STR;
case IPL_TYPE_FCP_DUMP:
return IPL_FCP_DUMP_STR;
case IPL_TYPE_NSS:
return IPL_NSS_STR;
case IPL_TYPE_UNKNOWN:
default:
return IPL_UNKNOWN_STR;
}
}
enum dump_type {
DUMP_TYPE_NONE = 1,
DUMP_TYPE_CCW = 2,
DUMP_TYPE_FCP = 4,
};
static char *dump_type_str(enum dump_type type)
{
switch (type) {
case DUMP_TYPE_NONE:
return DUMP_NONE_STR;
case DUMP_TYPE_CCW:
return DUMP_CCW_STR;
case DUMP_TYPE_FCP:
return DUMP_FCP_STR;
default:
return NULL;
}
}
/*
* Must be in data section since the bss section
* is not cleared when these are accessed.
*/
static u8 ipl_ssid __attribute__((__section__(".data"))) = 0;
static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
u32 ipl_flags __attribute__((__section__(".data"))) = 0;
enum ipl_method {
REIPL_METHOD_CCW_CIO,
REIPL_METHOD_CCW_DIAG,
REIPL_METHOD_CCW_VM,
REIPL_METHOD_FCP_RO_DIAG,
REIPL_METHOD_FCP_RW_DIAG,
REIPL_METHOD_FCP_RO_VM,
REIPL_METHOD_FCP_DUMP,
REIPL_METHOD_NSS,
REIPL_METHOD_NSS_DIAG,
REIPL_METHOD_DEFAULT,
};
enum dump_method {
DUMP_METHOD_NONE,
DUMP_METHOD_CCW_CIO,
DUMP_METHOD_CCW_DIAG,
DUMP_METHOD_CCW_VM,
DUMP_METHOD_FCP_DIAG,
};
static int diag308_set_works = 0;
static struct ipl_parameter_block ipl_block;
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
static struct ipl_parameter_block *reipl_block_fcp;
static struct ipl_parameter_block *reipl_block_ccw;
static struct ipl_parameter_block *reipl_block_nss;
static struct ipl_parameter_block *reipl_block_actual;
static int dump_capabilities = DUMP_TYPE_NONE;
static enum dump_type dump_type = DUMP_TYPE_NONE;
static enum dump_method dump_method = DUMP_METHOD_NONE;
static struct ipl_parameter_block *dump_block_fcp;
static struct ipl_parameter_block *dump_block_ccw;
static struct sclp_ipl_info sclp_ipl_info;
static inline int __diag308(unsigned long subcode, void *addr)
{
register unsigned long _addr asm("0") = (unsigned long) addr;
register unsigned long _rc asm("1") = 0;
asm volatile(
" diag %0,%2,0x308\n"
"0:\n"
EX_TABLE(0b,0b)
: "+d" (_addr), "+d" (_rc)
: "d" (subcode) : "cc", "memory");
return _rc;
}
int diag308(unsigned long subcode, void *addr)
{
diag_stat_inc(DIAG_STAT_X308);
return __diag308(subcode, addr);
}
EXPORT_SYMBOL_GPL(diag308);
/* SYSFS */
#define IPL_ATTR_SHOW_FN(_prefix, _name, _format, args...) \
static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
char *page) \
{ \
return snprintf(page, PAGE_SIZE, _format, ##args); \
}
#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
unsigned long long ssid, devno; \
\
if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \
return -EINVAL; \
\
if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \
return -EINVAL; \
\
_ipl_blk.ssid = ssid; \
_ipl_blk.devno = devno; \
return len; \
}
#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \
IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \
_ipl_blk.ssid, _ipl_blk.devno); \
IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name, (S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store) \
#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL)
#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
unsigned long long value; \
if (sscanf(buf, _fmt_in, &value) != 1) \
return -EINVAL; \
_value = value; \
return len; \
} \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store)
#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
strncpy(_value, buf, sizeof(_value) - 1); \
strim(_value); \
return len; \
} \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store)
static void make_attrs_ro(struct attribute **attrs)
{
while (*attrs) {
(*attrs)->mode = S_IRUGO;
attrs++;
}
}
/*
* ipl section
*/
static __init enum ipl_type get_ipl_type(void)
{
struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
if (ipl_flags & IPL_NSS_VALID)
return IPL_TYPE_NSS;
if (!(ipl_flags & IPL_DEVNO_VALID))
return IPL_TYPE_UNKNOWN;
if (!(ipl_flags & IPL_PARMBLOCK_VALID))
return IPL_TYPE_CCW;
if (ipl->hdr.version > IPL_MAX_SUPPORTED_VERSION)
return IPL_TYPE_UNKNOWN;
if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP)
return IPL_TYPE_UNKNOWN;
if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
return IPL_TYPE_FCP_DUMP;
return IPL_TYPE_FCP;
}
struct ipl_info ipl_info;
EXPORT_SYMBOL_GPL(ipl_info);
static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
char *page)
{
return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
}
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
/* VM IPL PARM routines */
static size_t reipl_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
int i;
size_t len;
char has_lowercase = 0;
len = 0;
if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
(ipb->ipl_info.ccw.vm_parm_len > 0)) {
len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
*/
for (i = 0; i < len; i++)
if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
(dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
(dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
has_lowercase = 1;
break;
}
if (!has_lowercase)
EBC_TOLOWER(dest, len);
EBCASC(dest, len);
}
dest[len] = 0;
return len;
}
size_t append_ipl_vmparm(char *dest, size_t size)
{
size_t rc;
rc = 0;
if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
else
dest[0] = 0;
return rc;
}
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
append_ipl_vmparm(parm, sizeof(parm));
return sprintf(page, "%s\n", parm);
}
static size_t scpdata_length(const char* buf, size_t count)
{
while (count) {
if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
break;
count--;
}
return count;
}
static size_t reipl_append_ascii_scpdata(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
size_t count;
size_t i;
int has_lowercase;
count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
ipb->ipl_info.fcp.scp_data_len));
if (!count)
goto out;
has_lowercase = 0;
for (i = 0; i < count; i++) {
if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
count = 0;
goto out;
}
if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
has_lowercase = 1;
}
if (has_lowercase)
memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
else
for (i = 0; i < count; i++)
dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
out:
dest[count] = '\0';
return count;
}
size_t append_ipl_scpdata(char *dest, size_t len)
{
size_t rc;
rc = 0;
if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
else
dest[0] = 0;
return rc;
}
static struct kobj_attribute sys_ipl_vm_parm_attr =
__ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
static ssize_t sys_ipl_device_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
switch (ipl_info.type) {
case IPL_TYPE_CCW:
return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno);
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
default:
return 0;
}
}
static struct kobj_attribute sys_ipl_device_attr =
__ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
IPL_PARMBLOCK_SIZE);
}
static struct bin_attribute ipl_parameter_attr =
__BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL,
PAGE_SIZE);
static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static struct bin_attribute ipl_scp_data_attr =
__BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE);
static struct bin_attribute *ipl_fcp_bin_attrs[] = {
&ipl_parameter_attr,
&ipl_scp_data_attr,
NULL,
};
/* FCP ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n", (unsigned long long)
IPL_PARMBLOCK_START->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n", (unsigned long long)
IPL_PARMBLOCK_START->ipl_info.fcp.lun);
DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long)
IPL_PARMBLOCK_START->ipl_info.fcp.bootprog);
DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char loadparm[LOADPARM_LEN + 1] = {};
if (!sclp_ipl_info.is_valid)
return sprintf(page, "#unknown#\n");
memcpy(loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
strim(loadparm);
return sprintf(page, "%s\n", loadparm);
}
static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
static struct attribute *ipl_fcp_attrs[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_fcp_wwpn_attr.attr,
&sys_ipl_fcp_lun_attr.attr,
&sys_ipl_fcp_bootprog_attr.attr,
&sys_ipl_fcp_br_lba_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
NULL,
};
static struct attribute_group ipl_fcp_attr_group = {
.attrs = ipl_fcp_attrs,
.bin_attrs = ipl_fcp_bin_attrs,
};
/* CCW ipl device attributes */
static struct attribute *ipl_ccw_attrs_vm[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_vm_parm_attr.attr,
NULL,
};
static struct attribute *ipl_ccw_attrs_lpar[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
NULL,
};
static struct attribute_group ipl_ccw_attr_group_vm = {
.attrs = ipl_ccw_attrs_vm,
};
static struct attribute_group ipl_ccw_attr_group_lpar = {
.attrs = ipl_ccw_attrs_lpar
};
/* NSS ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
static struct attribute *ipl_nss_attrs[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_nss_name_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_vm_parm_attr.attr,
NULL,
};
static struct attribute_group ipl_nss_attr_group = {
.attrs = ipl_nss_attrs,
};
/* UNKNOWN ipl device attributes */
static struct attribute *ipl_unknown_attrs[] = {
&sys_ipl_type_attr.attr,
NULL,
};
static struct attribute_group ipl_unknown_attr_group = {
.attrs = ipl_unknown_attrs,
};
static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
diag308(DIAG308_IPL, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
else if (ipl_info.type == IPL_TYPE_CCW)
reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
}
static void ipl_run(struct shutdown_trigger *trigger)
{
smp_call_ipl_cpu(__ipl_run, NULL);
}
static int __init ipl_init(void)
{
int rc;
ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
if (!ipl_kset) {
rc = -ENOMEM;
goto out;
}
switch (ipl_info.type) {
case IPL_TYPE_CCW:
if (MACHINE_IS_VM)
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_ccw_attr_group_vm);
else
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_ccw_attr_group_lpar);
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break;
case IPL_TYPE_NSS:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
break;
default:
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_unknown_attr_group);
break;
}
out:
if (rc)
panic("ipl_init failed: rc = %i\n", rc);
return 0;
}
static struct shutdown_action __refdata ipl_action = {
.name = SHUTDOWN_ACTION_IPL_STR,
.fn = ipl_run,
.init = ipl_init,
};
/*
* reipl shutdown action: Reboot Linux on shutdown.
*/
/* VM IPL PARM attributes */
static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
char *page)
{
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
return sprintf(page, "%s\n", vmparm);
}
static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
size_t vmparm_max,
const char *buf, size_t len)
{
int i, ip_len;
/* ignore trailing newline */
ip_len = len;
if ((len > 0) && (buf[len - 1] == '\n'))
ip_len--;
if (ip_len > vmparm_max)
return -EINVAL;
/* parm is used to store kernel options, check for common chars */
for (i = 0; i < ip_len; i++)
if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
return -EINVAL;
memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
ipb->ipl_info.ccw.vm_parm_len = ip_len;
if (ip_len > 0) {
ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len);
ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len);
} else {
ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID;
}
return len;
}
/* NSS wrapper */
static ssize_t reipl_nss_vmparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_vmparm_show(reipl_block_nss, page);
}
static ssize_t reipl_nss_vmparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_vmparm_store(reipl_block_nss, 56, buf, len);
}
/* CCW wrapper */
static ssize_t reipl_ccw_vmparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_vmparm_show(reipl_block_ccw, page);
}
static ssize_t reipl_ccw_vmparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_vmparm_store(reipl_block_ccw, 64, buf, len);
}
static struct kobj_attribute sys_reipl_nss_vmparm_attr =
__ATTR(parm, S_IRUGO | S_IWUSR, reipl_nss_vmparm_show,
reipl_nss_vmparm_store);
static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
__ATTR(parm, S_IRUGO | S_IWUSR, reipl_ccw_vmparm_show,
reipl_ccw_vmparm_store);
/* FCP reipl device attributes */
static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len;
void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t scpdata_len = count;
size_t padding;
if (off)
return -EINVAL;
memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
0, padding);
scpdata_len += padding;
}
reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len;
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len;
return count;
}
static struct bin_attribute sys_reipl_fcp_scp_data_attr =
__BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_fcp_bin_attrs[] = {
&sys_reipl_fcp_scp_data_attr,
NULL,
};
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
reipl_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
reipl_block_fcp->ipl_info.fcp.lun);
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
reipl_block_fcp->ipl_info.fcp.bootprog);
DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
reipl_block_fcp->ipl_info.fcp.br_lba);
DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
reipl_block_fcp->ipl_info.fcp.devno);
static void reipl_get_ascii_loadparm(char *loadparm,
struct ipl_parameter_block *ibp)
{
memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
strim(loadparm);
}
static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
char *page)
{
char buf[LOADPARM_LEN + 1];
reipl_get_ascii_loadparm(buf, ipb);
return sprintf(page, "%s\n", buf);
}
static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
const char *buf, size_t len)
{
int i, lp_len;
/* ignore trailing newline */
lp_len = len;
if ((len > 0) && (buf[len - 1] == '\n'))
lp_len--;
/* loadparm can have max 8 characters and must not start with a blank */
if ((lp_len > LOADPARM_LEN) || ((lp_len > 0) && (buf[0] == ' ')))
return -EINVAL;
/* loadparm can only contain "a-z,A-Z,0-9,SP,." */
for (i = 0; i < lp_len; i++) {
if (isalpha(buf[i]) || isdigit(buf[i]) || (buf[i] == ' ') ||
(buf[i] == '.'))
continue;
return -EINVAL;
}
/* initialize loadparm with blanks */
memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN);
/* copy and convert to ebcdic */
memcpy(ipb->hdr.loadparm, buf, lp_len);
ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
return len;
}
/* FCP wrapper */
static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_loadparm_show(reipl_block_fcp, page);
}
static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_loadparm_store(reipl_block_fcp, buf, len);
}
static struct kobj_attribute sys_reipl_fcp_loadparm_attr =
__ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show,
reipl_fcp_loadparm_store);
static struct attribute *reipl_fcp_attrs[] = {
&sys_reipl_fcp_device_attr.attr,
&sys_reipl_fcp_wwpn_attr.attr,
&sys_reipl_fcp_lun_attr.attr,
&sys_reipl_fcp_bootprog_attr.attr,
&sys_reipl_fcp_br_lba_attr.attr,
&sys_reipl_fcp_loadparm_attr.attr,
NULL,
};
static struct attribute_group reipl_fcp_attr_group = {
.attrs = reipl_fcp_attrs,
.bin_attrs = reipl_fcp_bin_attrs,
};
/* CCW reipl device attributes */
DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
/* NSS wrapper */
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_loadparm_show(reipl_block_nss, page);
}
static ssize_t reipl_nss_loadparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_loadparm_store(reipl_block_nss, buf, len);
}
/* CCW wrapper */
static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_loadparm_show(reipl_block_ccw, page);
}
static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_loadparm_store(reipl_block_ccw, buf, len);
}
static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
__ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show,
reipl_ccw_loadparm_store);
static struct attribute *reipl_ccw_attrs_vm[] = {
&sys_reipl_ccw_device_attr.attr,
&sys_reipl_ccw_loadparm_attr.attr,
&sys_reipl_ccw_vmparm_attr.attr,
NULL,
};
static struct attribute *reipl_ccw_attrs_lpar[] = {
&sys_reipl_ccw_device_attr.attr,
&sys_reipl_ccw_loadparm_attr.attr,
NULL,
};
static struct attribute_group reipl_ccw_attr_group_vm = {
.name = IPL_CCW_STR,
.attrs = reipl_ccw_attrs_vm,
};
static struct attribute_group reipl_ccw_attr_group_lpar = {
.name = IPL_CCW_STR,
.attrs = reipl_ccw_attrs_lpar,
};
/* NSS reipl device attributes */
static void reipl_get_ascii_nss_name(char *dst,
struct ipl_parameter_block *ipb)
{
memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
EBCASC(dst, NSS_NAME_SIZE);
dst[NSS_NAME_SIZE] = 0;
}
static ssize_t reipl_nss_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char nss_name[NSS_NAME_SIZE + 1] = {};
reipl_get_ascii_nss_name(nss_name, reipl_block_nss);
return sprintf(page, "%s\n", nss_name);
}
static ssize_t reipl_nss_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int nss_len;
/* ignore trailing newline */
nss_len = len;
if ((len > 0) && (buf[len - 1] == '\n'))
nss_len--;
if (nss_len > NSS_NAME_SIZE)
return -EINVAL;
memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE);
if (nss_len > 0) {
reipl_block_nss->ipl_info.ccw.vm_flags |=
DIAG308_VM_FLAGS_NSS_VALID;
memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len);
ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
} else {
reipl_block_nss->ipl_info.ccw.vm_flags &=
~DIAG308_VM_FLAGS_NSS_VALID;
}
return len;
}
static struct kobj_attribute sys_reipl_nss_name_attr =
__ATTR(name, S_IRUGO | S_IWUSR, reipl_nss_name_show,
reipl_nss_name_store);
static struct kobj_attribute sys_reipl_nss_loadparm_attr =
__ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nss_loadparm_show,
reipl_nss_loadparm_store);
static struct attribute *reipl_nss_attrs[] = {
&sys_reipl_nss_name_attr.attr,
&sys_reipl_nss_loadparm_attr.attr,
&sys_reipl_nss_vmparm_attr.attr,
NULL,
};
static struct attribute_group reipl_nss_attr_group = {
.name = IPL_NSS_STR,
.attrs = reipl_nss_attrs,
};
static void set_reipl_block_actual(struct ipl_parameter_block *reipl_block)
{
reipl_block_actual = reipl_block;
os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual,
reipl_block->hdr.len);
}
/* reipl type */
static int reipl_set_type(enum ipl_type type)
{
if (!(reipl_capabilities & type))
return -EINVAL;
switch(type) {
case IPL_TYPE_CCW:
if (diag308_set_works)
reipl_method = REIPL_METHOD_CCW_DIAG;
else if (MACHINE_IS_VM)
reipl_method = REIPL_METHOD_CCW_VM;
else
reipl_method = REIPL_METHOD_CCW_CIO;
set_reipl_block_actual(reipl_block_ccw);
break;
case IPL_TYPE_FCP:
if (diag308_set_works)
reipl_method = REIPL_METHOD_FCP_RW_DIAG;
else if (MACHINE_IS_VM)
reipl_method = REIPL_METHOD_FCP_RO_VM;
else
reipl_method = REIPL_METHOD_FCP_RO_DIAG;
set_reipl_block_actual(reipl_block_fcp);
break;
case IPL_TYPE_FCP_DUMP:
reipl_method = REIPL_METHOD_FCP_DUMP;
break;
case IPL_TYPE_NSS:
if (diag308_set_works)
reipl_method = REIPL_METHOD_NSS_DIAG;
else
reipl_method = REIPL_METHOD_NSS;
set_reipl_block_actual(reipl_block_nss);
break;
case IPL_TYPE_UNKNOWN:
reipl_method = REIPL_METHOD_DEFAULT;
break;
default:
BUG();
}
reipl_type = type;
return 0;
}
static ssize_t reipl_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", ipl_type_str(reipl_type));
}
static ssize_t reipl_type_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int rc = -EINVAL;
if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_CCW);
else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_FCP);
else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_NSS);
return (rc != 0) ? rc : len;
}
static struct kobj_attribute reipl_type_attr =
__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
static struct kset *reipl_kset;
static struct kset *reipl_fcp_kset;
static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
const enum ipl_method m)
{
char loadparm[LOADPARM_LEN + 1] = {};
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
char nss_name[NSS_NAME_SIZE + 1] = {};
size_t pos = 0;
reipl_get_ascii_loadparm(loadparm, ipb);
reipl_get_ascii_nss_name(nss_name, ipb);
reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
switch (m) {
case REIPL_METHOD_CCW_VM:
pos = sprintf(dst, "IPL %X CLEAR", ipb->ipl_info.ccw.devno);
break;
case REIPL_METHOD_NSS:
pos = sprintf(dst, "IPL %s", nss_name);
break;
default:
break;
}
if (strlen(loadparm) > 0)
pos += sprintf(dst + pos, " LOADPARM '%s'", loadparm);
if (strlen(vmparm) > 0)
sprintf(dst + pos, " PARM %s", vmparm);
}
static void __reipl_run(void *unused)
{
struct ccw_dev_id devid;
static char buf[128];
switch (reipl_method) {
case REIPL_METHOD_CCW_CIO:
devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid;
devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
reipl_ccw_dev(&devid);
break;
case REIPL_METHOD_CCW_VM:
get_ipl_string(buf, reipl_block_ccw, REIPL_METHOD_CCW_VM);
__cpcmd(buf, NULL, 0, NULL);
break;
case REIPL_METHOD_CCW_DIAG:
diag308(DIAG308_SET, reipl_block_ccw);
diag308(DIAG308_IPL, NULL);
break;
case REIPL_METHOD_FCP_RW_DIAG:
diag308(DIAG308_SET, reipl_block_fcp);
diag308(DIAG308_IPL, NULL);
break;
case REIPL_METHOD_FCP_RO_DIAG:
diag308(DIAG308_IPL, NULL);
break;
case REIPL_METHOD_FCP_RO_VM:
__cpcmd("IPL", NULL, 0, NULL);
break;
case REIPL_METHOD_NSS_DIAG:
diag308(DIAG308_SET, reipl_block_nss);
diag308(DIAG308_IPL, NULL);
break;
case REIPL_METHOD_NSS:
get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS);
__cpcmd(buf, NULL, 0, NULL);
break;
case REIPL_METHOD_DEFAULT:
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
diag308(DIAG308_IPL, NULL);
break;
case REIPL_METHOD_FCP_DUMP:
break;
}
disabled_wait((unsigned long) __builtin_return_address(0));
}
static void reipl_run(struct shutdown_trigger *trigger)
{
smp_call_ipl_cpu(__reipl_run, NULL);
}
static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
{
ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW;
}
static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
{
/* LOADPARM */
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
else
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN);
ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
/* VM PARM */
if (MACHINE_IS_VM && diag308_set_works &&
(ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
ipb->ipl_info.ccw.vm_parm_len =
ipl_block.ipl_info.ccw.vm_parm_len;
memcpy(ipb->ipl_info.ccw.vm_parm,
ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE);
}
}
static int __init reipl_nss_init(void)
{
int rc;
if (!MACHINE_IS_VM)
return 0;
reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_nss)
return -ENOMEM;
if (!diag308_set_works)
sys_reipl_nss_vmparm_attr.attr.mode = S_IRUGO;
rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
if (rc)
return rc;
reipl_block_ccw_init(reipl_block_nss);
if (ipl_info.type == IPL_TYPE_NSS) {
memset(reipl_block_nss->ipl_info.ccw.nss_name,
' ', NSS_NAME_SIZE);
memcpy(reipl_block_nss->ipl_info.ccw.nss_name,
kernel_nss_name, strlen(kernel_nss_name));
ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
reipl_block_nss->ipl_info.ccw.vm_flags |=
DIAG308_VM_FLAGS_NSS_VALID;
reipl_block_ccw_fill_parms(reipl_block_nss);
}
reipl_capabilities |= IPL_TYPE_NSS;
return 0;
}
static int __init reipl_ccw_init(void)
{
int rc;
reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_ccw)
return -ENOMEM;
if (MACHINE_IS_VM) {
if (!diag308_set_works)
sys_reipl_ccw_vmparm_attr.attr.mode = S_IRUGO;
rc = sysfs_create_group(&reipl_kset->kobj,
&reipl_ccw_attr_group_vm);
} else {
if(!diag308_set_works)
sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
rc = sysfs_create_group(&reipl_kset->kobj,
&reipl_ccw_attr_group_lpar);
}
if (rc)
return rc;
reipl_block_ccw_init(reipl_block_ccw);
if (ipl_info.type == IPL_TYPE_CCW) {
reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid;
reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
reipl_block_ccw_fill_parms(reipl_block_ccw);
}
reipl_capabilities |= IPL_TYPE_CCW;
return 0;
}
static int __init reipl_fcp_init(void)
{
int rc;
if (!diag308_set_works) {
if (ipl_info.type == IPL_TYPE_FCP) {
make_attrs_ro(reipl_fcp_attrs);
sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO;
} else
return 0;
}
reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_fcp)
return -ENOMEM;
/* sysfs: create fcp kset for mixing attr group and bin attrs */
reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL,
&reipl_kset->kobj);
if (!reipl_fcp_kset) {
free_page((unsigned long) reipl_block_fcp);
return -ENOMEM;
}
rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
if (rc) {
kset_unregister(reipl_fcp_kset);
free_page((unsigned long) reipl_block_fcp);
return rc;
}
if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
/*
* Fix loadparm: There are systems where the (SCSI) LOADPARM
* is invalid in the SCSI IPL parameter block, so take it
* always from sclp_ipl_info.
*/
memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm,
LOADPARM_LEN);
} else {
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
reipl_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
reipl_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_IPL;
}
reipl_capabilities |= IPL_TYPE_FCP;
return 0;
}
static int __init reipl_type_init(void)
{
enum ipl_type reipl_type = ipl_info.type;
struct ipl_parameter_block *reipl_block;
unsigned long size;
reipl_block = os_info_old_entry(OS_INFO_REIPL_BLOCK, &size);
if (!reipl_block)
goto out;
/*
* If we have an OS info reipl block, this will be used
*/
if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, reipl_block, size);
reipl_type = IPL_TYPE_FCP;
} else if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_CCW) {
memcpy(reipl_block_ccw, reipl_block, size);
reipl_type = IPL_TYPE_CCW;
}
out:
return reipl_set_type(reipl_type);
}
static int __init reipl_init(void)
{
int rc;
reipl_kset = kset_create_and_add("reipl", NULL, firmware_kobj);
if (!reipl_kset)
return -ENOMEM;
rc = sysfs_create_file(&reipl_kset->kobj, &reipl_type_attr.attr);
if (rc) {
kset_unregister(reipl_kset);
return rc;
}
rc = reipl_ccw_init();
if (rc)
return rc;
rc = reipl_fcp_init();
if (rc)
return rc;
rc = reipl_nss_init();
if (rc)
return rc;
return reipl_type_init();
}
static struct shutdown_action __refdata reipl_action = {
.name = SHUTDOWN_ACTION_REIPL_STR,
.fn = reipl_run,
.init = reipl_init,
};
/*
* dump shutdown action: Dump Linux on shutdown.
*/
/* FCP dump device attributes */
DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
dump_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
dump_block_fcp->ipl_info.fcp.lun);
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
dump_block_fcp->ipl_info.fcp.bootprog);
DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
dump_block_fcp->ipl_info.fcp.br_lba);
DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
dump_block_fcp->ipl_info.fcp.devno);
static struct attribute *dump_fcp_attrs[] = {
&sys_dump_fcp_device_attr.attr,
&sys_dump_fcp_wwpn_attr.attr,
&sys_dump_fcp_lun_attr.attr,
&sys_dump_fcp_bootprog_attr.attr,
&sys_dump_fcp_br_lba_attr.attr,
NULL,
};
static struct attribute_group dump_fcp_attr_group = {
.name = IPL_FCP_STR,
.attrs = dump_fcp_attrs,
};
/* CCW dump device attributes */
DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
static struct attribute *dump_ccw_attrs[] = {
&sys_dump_ccw_device_attr.attr,
NULL,
};
static struct attribute_group dump_ccw_attr_group = {
.name = IPL_CCW_STR,
.attrs = dump_ccw_attrs,
};
/* dump type */
static int dump_set_type(enum dump_type type)
{
if (!(dump_capabilities & type))
return -EINVAL;
switch (type) {
case DUMP_TYPE_CCW:
if (diag308_set_works)
dump_method = DUMP_METHOD_CCW_DIAG;
else if (MACHINE_IS_VM)
dump_method = DUMP_METHOD_CCW_VM;
else
dump_method = DUMP_METHOD_CCW_CIO;
break;
case DUMP_TYPE_FCP:
dump_method = DUMP_METHOD_FCP_DIAG;
break;
default:
dump_method = DUMP_METHOD_NONE;
}
dump_type = type;
return 0;
}
static ssize_t dump_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", dump_type_str(dump_type));
}
static ssize_t dump_type_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int rc = -EINVAL;
if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_NONE);
else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_CCW);
else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
rc = dump_set_type(DUMP_TYPE_FCP);
return (rc != 0) ? rc : len;
}
static struct kobj_attribute dump_type_attr =
__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
static struct kset *dump_kset;
static void diag308_dump(void *dump_block)
{
diag308(DIAG308_SET, dump_block);
while (1) {
if (diag308(DIAG308_DUMP, NULL) != 0x302)
break;
udelay_simple(USEC_PER_SEC);
}
}
static void __dump_run(void *unused)
{
struct ccw_dev_id devid;
static char buf[100];
switch (dump_method) {
case DUMP_METHOD_CCW_CIO:
devid.ssid = dump_block_ccw->ipl_info.ccw.ssid;
devid.devno = dump_block_ccw->ipl_info.ccw.devno;
reipl_ccw_dev(&devid);
break;
case DUMP_METHOD_CCW_VM:
sprintf(buf, "STORE STATUS");
__cpcmd(buf, NULL, 0, NULL);
sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
__cpcmd(buf, NULL, 0, NULL);
break;
case DUMP_METHOD_CCW_DIAG:
diag308_dump(dump_block_ccw);
break;
case DUMP_METHOD_FCP_DIAG:
diag308_dump(dump_block_fcp);
break;
default:
break;
}
}
static void dump_run(struct shutdown_trigger *trigger)
{
if (dump_method == DUMP_METHOD_NONE)
return;
smp_send_stop();
smp_call_ipl_cpu(__dump_run, NULL);
}
static int __init dump_ccw_init(void)
{
int rc;
dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
if (!dump_block_ccw)
return -ENOMEM;
rc = sysfs_create_group(&dump_kset->kobj, &dump_ccw_attr_group);
if (rc) {
free_page((unsigned long)dump_block_ccw);
return rc;
}
dump_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
dump_capabilities |= DUMP_TYPE_CCW;
return 0;
}
static int __init dump_fcp_init(void)
{
int rc;
if (!sclp_ipl_info.has_dump)
return 0; /* LDIPL DUMP is not installed */
if (!diag308_set_works)
return 0;
dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!dump_block_fcp)
return -ENOMEM;
rc = sysfs_create_group(&dump_kset->kobj, &dump_fcp_attr_group);
if (rc) {
free_page((unsigned long)dump_block_fcp);
return rc;
}
dump_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
dump_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_FCP;
return 0;
}
static int __init dump_init(void)
{
int rc;
dump_kset = kset_create_and_add("dump", NULL, firmware_kobj);
if (!dump_kset)
return -ENOMEM;
rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr);
if (rc) {
kset_unregister(dump_kset);
return rc;
}
rc = dump_ccw_init();
if (rc)
return rc;
rc = dump_fcp_init();
if (rc)
return rc;
dump_set_type(DUMP_TYPE_NONE);
return 0;
}
static struct shutdown_action __refdata dump_action = {
.name = SHUTDOWN_ACTION_DUMP_STR,
.fn = dump_run,
.init = dump_init,
};
static void dump_reipl_run(struct shutdown_trigger *trigger)
{
unsigned long ipib = (unsigned long) reipl_block_actual;
unsigned int csum;
csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
mem_assign_absolute(S390_lowcore.ipib, ipib);
mem_assign_absolute(S390_lowcore.ipib_checksum, csum);
dump_run(trigger);
}
static int __init dump_reipl_init(void)
{
if (!diag308_set_works)
return -EOPNOTSUPP;
else
return 0;
}
static struct shutdown_action __refdata dump_reipl_action = {
.name = SHUTDOWN_ACTION_DUMP_REIPL_STR,
.fn = dump_reipl_run,
.init = dump_reipl_init,
};
/*
* vmcmd shutdown action: Trigger vm command on shutdown.
*/
static char vmcmd_on_reboot[128];
static char vmcmd_on_panic[128];
static char vmcmd_on_halt[128];
static char vmcmd_on_poff[128];
static char vmcmd_on_restart[128];
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_restart, "%s\n", "%s\n", vmcmd_on_restart);
static struct attribute *vmcmd_attrs[] = {
&sys_vmcmd_on_reboot_attr.attr,
&sys_vmcmd_on_panic_attr.attr,
&sys_vmcmd_on_halt_attr.attr,
&sys_vmcmd_on_poff_attr.attr,
&sys_vmcmd_on_restart_attr.attr,
NULL,
};
static struct attribute_group vmcmd_attr_group = {
.attrs = vmcmd_attrs,
};
static struct kset *vmcmd_kset;
static void vmcmd_run(struct shutdown_trigger *trigger)
{
char *cmd;
if (strcmp(trigger->name, ON_REIPL_STR) == 0)
cmd = vmcmd_on_reboot;
else if (strcmp(trigger->name, ON_PANIC_STR) == 0)
cmd = vmcmd_on_panic;
else if (strcmp(trigger->name, ON_HALT_STR) == 0)
cmd = vmcmd_on_halt;
else if (strcmp(trigger->name, ON_POFF_STR) == 0)
cmd = vmcmd_on_poff;
else if (strcmp(trigger->name, ON_RESTART_STR) == 0)
cmd = vmcmd_on_restart;
else
return;
if (strlen(cmd) == 0)
return;
__cpcmd(cmd, NULL, 0, NULL);
}
static int vmcmd_init(void)
{
if (!MACHINE_IS_VM)
return -EOPNOTSUPP;
vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
if (!vmcmd_kset)
return -ENOMEM;
return sysfs_create_group(&vmcmd_kset->kobj, &vmcmd_attr_group);
}
static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
vmcmd_run, vmcmd_init};
/*
* stop shutdown action: Stop Linux on shutdown.
*/
static void stop_run(struct shutdown_trigger *trigger)
{
if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
strcmp(trigger->name, ON_RESTART_STR) == 0)
disabled_wait((unsigned long) __builtin_return_address(0));
smp_stop_cpu();
}
static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
stop_run, NULL};
/* action list */
static struct shutdown_action *shutdown_actions_list[] = {
&ipl_action, &reipl_action, &dump_reipl_action, &dump_action,
&vmcmd_action, &stop_action};
#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *))
/*
* Trigger section
*/
static struct kset *shutdown_actions_kset;
static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
size_t len)
{
int i;
for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
if (sysfs_streq(buf, shutdown_actions_list[i]->name)) {
if (shutdown_actions_list[i]->init_rc) {
return shutdown_actions_list[i]->init_rc;
} else {
trigger->action = shutdown_actions_list[i];
return len;
}
}
}
return -EINVAL;
}
/* on reipl */
static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR,
&reipl_action};
static ssize_t on_reboot_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_reboot_trigger.action->name);
}
static ssize_t on_reboot_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_reboot_trigger, len);
}
static struct kobj_attribute on_reboot_attr = __ATTR_RW(on_reboot);
static void do_machine_restart(char *__unused)
{
smp_send_stop();
on_reboot_trigger.action->fn(&on_reboot_trigger);
reipl_run(NULL);
}
void (*_machine_restart)(char *command) = do_machine_restart;
/* on panic */
static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action};
static ssize_t on_panic_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_panic_trigger.action->name);
}
static ssize_t on_panic_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_panic_trigger, len);
}
static struct kobj_attribute on_panic_attr = __ATTR_RW(on_panic);
static void do_panic(void)
{
lgr_info_log();
on_panic_trigger.action->fn(&on_panic_trigger);
stop_run(&on_panic_trigger);
}
/* on restart */
static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR,
&stop_action};
static ssize_t on_restart_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_restart_trigger.action->name);
}
static ssize_t on_restart_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_restart_trigger, len);
}
static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
static void __do_restart(void *ignore)
{
__arch_local_irq_stosm(0x04); /* enable DAT */
smp_send_stop();
#ifdef CONFIG_CRASH_DUMP
crash_kexec(NULL);
#endif
on_restart_trigger.action->fn(&on_restart_trigger);
stop_run(&on_restart_trigger);
}
void do_restart(void)
{
tracing_off();
debug_locks_off();
lgr_info_log();
smp_call_online_cpu(__do_restart, NULL);
}
/* on halt */
static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
static ssize_t on_halt_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_halt_trigger.action->name);
}
static ssize_t on_halt_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_halt_trigger, len);
}
static struct kobj_attribute on_halt_attr = __ATTR_RW(on_halt);
static void do_machine_halt(void)
{
smp_send_stop();
on_halt_trigger.action->fn(&on_halt_trigger);
stop_run(&on_halt_trigger);
}
void (*_machine_halt)(void) = do_machine_halt;
/* on power off */
static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action};
static ssize_t on_poff_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", on_poff_trigger.action->name);
}
static ssize_t on_poff_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return set_trigger(buf, &on_poff_trigger, len);
}
static struct kobj_attribute on_poff_attr = __ATTR_RW(on_poff);
static void do_machine_power_off(void)
{
smp_send_stop();
on_poff_trigger.action->fn(&on_poff_trigger);
stop_run(&on_poff_trigger);
}
void (*_machine_power_off)(void) = do_machine_power_off;
static struct attribute *shutdown_action_attrs[] = {
&on_restart_attr.attr,
&on_reboot_attr.attr,
&on_panic_attr.attr,
&on_halt_attr.attr,
&on_poff_attr.attr,
NULL,
};
static struct attribute_group shutdown_action_attr_group = {
.attrs = shutdown_action_attrs,
};
static void __init shutdown_triggers_init(void)
{
shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
firmware_kobj);
if (!shutdown_actions_kset)
goto fail;
if (sysfs_create_group(&shutdown_actions_kset->kobj,
&shutdown_action_attr_group))
goto fail;
return;
fail:
panic("shutdown_triggers_init failed\n");
}
static void __init shutdown_actions_init(void)
{
int i;
for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
if (!shutdown_actions_list[i]->init)
continue;
shutdown_actions_list[i]->init_rc =
shutdown_actions_list[i]->init();
}
}
static int __init s390_ipl_init(void)
{
char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40};
sclp_get_ipl_info(&sclp_ipl_info);
/*
* Fix loadparm: There are systems where the (SCSI) LOADPARM
* returned by read SCP info is invalid (contains EBCDIC blanks)
* when the system has been booted via diag308. In that case we use
* the value from diag308, if available.
*
* There are also systems where diag308 store does not work in
* case the system is booted from HMC. Fortunately in this case
* READ SCP info provides the correct value.
*/
if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 &&
diag308_set_works)
memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm,
LOADPARM_LEN);
shutdown_actions_init();
shutdown_triggers_init();
return 0;
}
__initcall(s390_ipl_init);
static void __init strncpy_skip_quote(char *dst, char *src, int n)
{
int sx, dx;
dx = 0;
for (sx = 0; src[sx] != 0; sx++) {
if (src[sx] == '"')
continue;
dst[dx++] = src[sx];
if (dx >= n)
break;
}
}
static int __init vmcmd_on_reboot_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
strncpy_skip_quote(vmcmd_on_reboot, str, 127);
vmcmd_on_reboot[127] = 0;
on_reboot_trigger.action = &vmcmd_action;
return 1;
}
__setup("vmreboot=", vmcmd_on_reboot_setup);
static int __init vmcmd_on_panic_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
strncpy_skip_quote(vmcmd_on_panic, str, 127);
vmcmd_on_panic[127] = 0;
on_panic_trigger.action = &vmcmd_action;
return 1;
}
__setup("vmpanic=", vmcmd_on_panic_setup);
static int __init vmcmd_on_halt_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
strncpy_skip_quote(vmcmd_on_halt, str, 127);
vmcmd_on_halt[127] = 0;
on_halt_trigger.action = &vmcmd_action;
return 1;
}
__setup("vmhalt=", vmcmd_on_halt_setup);
static int __init vmcmd_on_poff_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
strncpy_skip_quote(vmcmd_on_poff, str, 127);
vmcmd_on_poff[127] = 0;
on_poff_trigger.action = &vmcmd_action;
return 1;
}
__setup("vmpoff=", vmcmd_on_poff_setup);
static int on_panic_notify(struct notifier_block *self,
unsigned long event, void *data)
{
do_panic();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = on_panic_notify,
.priority = INT_MIN,
};
void __init setup_ipl(void)
{
ipl_info.type = get_ipl_type();
switch (ipl_info.type) {
case IPL_TYPE_CCW:
ipl_info.data.ccw.dev_id.ssid = ipl_ssid;
ipl_info.data.ccw.dev_id.devno = ipl_devno;
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
ipl_info.data.fcp.dev_id.ssid = 0;
ipl_info.data.fcp.dev_id.devno =
IPL_PARMBLOCK_START->ipl_info.fcp.devno;
ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
break;
case IPL_TYPE_NSS:
strncpy(ipl_info.data.nss.name, kernel_nss_name,
sizeof(ipl_info.data.nss.name));
break;
case IPL_TYPE_UNKNOWN:
/* We have no info to copy */
break;
}
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
}
void __init ipl_update_parameters(void)
{
int rc;
rc = diag308(DIAG308_STORE, &ipl_block);
if ((rc == DIAG308_RC_OK) || (rc == DIAG308_RC_NOCONFIG))
diag308_set_works = 1;
}
void __init ipl_save_parameters(void)
{
struct cio_iplinfo iplinfo;
void *src, *dst;
if (cio_get_iplinfo(&iplinfo))
return;
ipl_ssid = iplinfo.ssid;
ipl_devno = iplinfo.devno;
ipl_flags |= IPL_DEVNO_VALID;
if (!iplinfo.is_qdio)
return;
ipl_flags |= IPL_PARMBLOCK_VALID;
src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
dst = (void *)IPL_PARMBLOCK_ORIGIN;
memmove(dst, src, PAGE_SIZE);
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
}
static LIST_HEAD(rcall);
static DEFINE_MUTEX(rcall_mutex);
void register_reset_call(struct reset_call *reset)
{
mutex_lock(&rcall_mutex);
list_add(&reset->list, &rcall);
mutex_unlock(&rcall_mutex);
}
EXPORT_SYMBOL_GPL(register_reset_call);
void unregister_reset_call(struct reset_call *reset)
{
mutex_lock(&rcall_mutex);
list_del(&reset->list);
mutex_unlock(&rcall_mutex);
}
EXPORT_SYMBOL_GPL(unregister_reset_call);
static void do_reset_calls(void)
{
struct reset_call *reset;
if (diag308_set_works) {
diag308_reset();
return;
}
list_for_each_entry(reset, &rcall, list)
reset->fn();
}
u32 dump_prefix_page;
void s390_reset_system(void (*fn_pre)(void),
void (*fn_post)(void *), void *data)
{
struct _lowcore *lc;
lc = (struct _lowcore *)(unsigned long) store_prefix();
/* Stack for interrupt/machine check handler */
lc->panic_stack = S390_lowcore.panic_stack;
/* Save prefix page address for dump case */
dump_prefix_page = (u32)(unsigned long) lc;
/* Disable prefixing */
set_prefix(0);
/* Disable lowcore protection */
__ctl_clear_bit(0,28);
/* Set new machine check handler */
S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
S390_lowcore.mcck_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
/* Set new program check handler */
S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
S390_lowcore.program_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
/*
* Clear subchannel ID and number to signal new kernel that no CCW or
* SCSI IPL has been done (for kexec and kdump)
*/
S390_lowcore.subchannel_id = 0;
S390_lowcore.subchannel_nr = 0;
/* Store status at absolute zero */
store_status();
/* Call function before reset */
if (fn_pre)
fn_pre();
do_reset_calls();
/* Call function after reset */
if (fn_post)
fn_post(data);
}
| gpl-2.0 |
hunterhu/linux-sunxi | modules/wifi/ar6003/AR6kSDK.build_3.1_RC.514/host/tools/sigma-dut/wfa_cmdtbl.c | 90 | 10876 |
/****************************************************************************
* (c) Copyright 2007 Wi-Fi Alliance. All Rights Reserved
*
*
* LICENSE
*
* License is granted only to Wi-Fi Alliance members and designated
* contractors ($B!H(BAuthorized Licensees$B!I(B)..AN Authorized Licensees are granted
* the non-exclusive, worldwide, limited right to use, copy, import, export
* and distribute this software:
* (i) solely for noncommercial applications and solely for testing Wi-Fi
* equipment; and
* (ii) solely for the purpose of embedding the software into Authorized
* Licensee$B!G(Bs proprietary equipment and software products for distribution to
* its customers under a license with at least the same restrictions as
* contained in this License, including, without limitation, the disclaimer of
* warranty and limitation of liability, below..AN The distribution rights
* granted in clause
* (ii), above, include distribution to third party companies who will
* redistribute the Authorized Licensee$B!G(Bs product to their customers with or
* without such third party$B!G(Bs private label. Other than expressly granted
* herein, this License is not transferable or sublicensable, and it does not
* extend to and may not be used with non-Wi-Fi applications..AN Wi-Fi Alliance
* reserves all rights not expressly granted herein..AN
*.AN
* Except as specifically set forth above, commercial derivative works of
* this software or applications that use the Wi-Fi scripts generated by this
* software are NOT AUTHORIZED without specific prior written permission from
* Wi-Fi Alliance.
*.AN
* Non-Commercial derivative works of this software for internal use are
* authorized and are limited by the same restrictions; provided, however,
* that the Authorized Licensee shall provide Wi-Fi Alliance with a copy of
* such derivative works under a perpetual, payment-free license to use,
* modify, and distribute such derivative works for purposes of testing Wi-Fi
* equipment.
*.AN
* Neither the name of the author nor "Wi-Fi Alliance" may be used to endorse
* or promote products that are derived from or that use this software without
* specific prior written permission from Wi-Fi Alliance.
*
* THIS SOFTWARE IS PROVIDED BY WI-FI ALLIANCE "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, NON-INFRINGEMENT AND FITNESS FOR A.AN PARTICULAR PURPOSE,
* ARE DISCLAIMED. IN NO EVENT SHALL WI-FI ALLIANCE BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, THE COST OF PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE) ARISING IN ANY WAY OUT OF
* THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************
*/
/*
* File: wfa_cmdtbl.c
* The file contains a predefined function array. The command process and
* execution functions of a DUT traffic generator and control will be
* registered in the array/table by the order of the defined commands TLV
* values.
*
* Revision History:
* 2006/03/10 -- initially created by qhu
* 2006/06/01 -- BETA release by qhu
* 2006/06/13 -- 00.02 release by qhu
* 2006/06/30 -- 00.10 Release by qhu
* 2006/07/10 -- 01.00 Release by qhu
* 2006/09/01 -- 01.05 Release by qhu
* 2007/02/15 -- WMM Extension Beta released by qhu, mkaroshi
* 2007/03/30 -- 01.40 WPA2 and Official WMM Beta release by qhu
* 2007/04/20 -- 02.00 WPA2 and Official WMM release by qhu
* 2007/08/15 -- 02.10 WMM-Power Save release by qhu
* 2007/10/10 -- 02.20 Voice SOHO beta -- qhu
* 2007/11/07 -- 02.30 Voice HSO -- qhu
* 2007/12/10 -- 02.32 Add a function to upload test results.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include "wfa_debug.h"
#include "wfa_types.h"
#include "wfa_main.h"
#include "wfa_tlv.h"
#include "wfa_tg.h"
#include "wfa_miscs.h"
#include "wfa_ca.h"
#include "wfa_sock.h"
#include "wfa_agt.h"
#include "wfa_rsp.h"
/* extern defined variables */
extern int gxcSockfd, btSockfd;
int NotDefinedYet(int len, unsigned char *params, int *respLen, BYTE *respBuf);
extern int agtCmdProcGetVersion(int len, BYTE *parms, int *respLen, BYTE *respBuf);
extern unsigned short wfa_defined_debug;
/* globally define the function table */
xcCommandFuncPtr gWfaCmdFuncTbl[WFA_STA_COMMANDS_END+(WFA_STA_NEW_COMMANDS_END - WFA_STA_NEW_COMMANDS_START - 1)+1] =
{
/* Traffic Agent Commands */
NotDefinedYet, /* None (0) */
agtCmdProcGetVersion, /* WFA_GET_VERSION_TLV (1) */
wfaTGSendPing, /* WFA_TRAFFIC_SEND_PING_TLV (2) */
wfaTGStopPing, /* WFA_TRAFFIC_STOP_PING_TLV (3) */
wfaTGConfig, /* WFA_TRAFFIC_AGENT_CONFIG_TLV (4) */
wfaTGSendStart, /* WFA_TRAFFIC_AGENT_SEND_TLV (5) */
wfaTGRecvStart, /* WFA_TRAFFIC_AGENT_RECV_START_TLV (6) */
wfaTGRecvStop, /* WFA_TRAFFIC_AGENT_RECV_STOP_TLV (7) */
wfaTGReset, /* WFA_TRAFFIC_AGENT_RESET_TLV (8) */
NotDefinedYet, /* WFA_TRAFFIC_AGENT_STATUS_TLV (9) */
/* Control and Configuration Commands */
wfaStaGetIpConfig, /* WFA_STA_GET_IP_CONFIG_TLV (10)*/
wfaStaSetIpConfig, /* WFA_STA_SET_IP_CONFIG_TLV (11)*/
wfaStaGetMacAddress, /* WFA_STA_GET_MAC_ADDRESS_TLV (12)*/
NotDefinedYet, /* WFA_STA_SET_MAC_ADDRESS_TLV (13)*/
wfaStaIsConnected, /* WFA_STA_IS_CONNECTED_TLV (14)*/
wfaStaVerifyIpConnection, /* WFA_STA_VERIFY_IP_CONNECTION_TLV (15)*/
wfaStaGetBSSID, /* WFA_STA_GET_BSSID_TLV (16)*/
wfaStaGetStats, /* WFA_STA_GET_STATS_TLV (17)*/
wfaSetEncryption, /* WFA_STA_SET_ENCRYPTION_TLV (18)*/
wfaStaSetPSK, /* WFA_STA_SET_PSK_TLV (19)*/
wfaStaSetEapTLS, /* WFA_STA_SET_EAPTLS_TLV (20)*/
wfaStaSetUAPSD, /* WFA_STA_SET_UAPSD_TLV (21)*/
wfaStaAssociate, /* WFA_STA_ASSOCIATE_TLV (22)*/
wfaStaSetEapTTLS, /* WFA_STA_SET_EAPTTLS_TLV (23)*/
wfaStaSetEapSIM, /* WFA_STA_SET_EAPSIM_TLV (24)*/
wfaStaSetPEAP, /* WFA_STA_SET_PEAP_TLV (25)*/
wfaStaSetIBSS, /* WFA_STA_SET_IBSS_TLV (26)*/
wfaStaGetInfo, /* WFA_STA_GET_INFO_TLV (27)*/
wfaDeviceGetInfo, /* WFA_DEVICE_GET_INFO_TLV (28)*/
wfaDeviceListIF, /* WFA_DEVICE_LIST_IF_TLV] (29)*/
wfaStaDebugSet, /* WFA_STA_DEBUG_SET (30)*/
wfaStaSetMode, /* WFA_STA_SET_MODE (31)*/
wfaStaUpload, /* WFA_STA_UPLOAD (32)*/
wfaStaSetWMM, /* WFA_STA_SETWMM (33)*/
wfaStaReAssociate, /* WFA_STA_REASSOCIATE (34)*/
wfaStaSetPwrSave, /* WFA_STA_SET_PWRSAVE (35)*/
wfaStaSendNeigReq, /* WFA_STA_SEND_NEIGREQ (36)*/
wfaStaPresetParams, /* WFA_STA_PRESET_PARAMETERS (37)*/
wfaStaSetEapFAST, /* WFA_STA_SET_EAPFAST_TLV (38)*/
wfaStaSetEapAKA, /* WFA_STA_SET_EAPAKA_TLV (39)*/
wfaStaSetSystime, /* WFA_STA_SET_SYSTIME_TLV (40)*/
wfaStaSet11n, /* WFA_STA_SET_11n_TLV (41)*/
wfaStaSetWireless, /* WFA_STA_SET_WIRELESS_TLV (42)*/
wfaStaSendADDBA, /* WFA_STA_SEND_ADDBA_TLV (43)*/
wfaStaSendCoExistMGMT, /* WFA_STA_SET_COEXIST_MGMT_TLV (44)*/
wfaStaSetRIFS, /* WFA_STA_SET_RIFS_TEST_TLV (45)*/
wfaStaResetDefault, /* WFA_STA_RESET_DEFAULT_TLV (46)*/
wfaStaDisconnect, /* WFA_STA_DISCONNECT_TLV (47)*/
wfaStaGetP2pDevAddress, /* WFA_STA_GET_P2P_DEV_ADDRESS_TLV (48)*/
wfaStaSetP2p, /* WFA_STA_SET_P2P_TLV (49)*/
wfaStaP2pConnect, /* WFA_STA_P2P_CONNECT_TLV (50)*/
wfaStaStartAutoGo, /* WFA_STA_START_AUTO_GO (51)*/
wfaStaP2pStartGrpFormation, /* WFA_STA_P2P_START_GRP_FORMATION_TLV (52)*/
wfaStaP2pDissolve, /* WFA_STA_P2P_DISSOLVE_TLV (53)*/
wfaStaSendP2pInvReq, /* WFA_STA_SEND_P2P_INV_REQ_TLV (54)*/
wfaStaAcceptP2pInvReq, /* WFA_STA_ACCEPT_P2P_INV_REQ_TLV (55)*/
wfaStaSendP2pProvDisReq, /* WFA_STA_SEND_P2P_PROV_DIS_REQ_TLV (56)*/
wfaStaSetWpsPbc, /* WFA_STA_SET_WPS_PBC_TLV (57)*/
wfaStaWpsReadPin, /* WFA_STA_WPS_READ_PIN_TLV (58)*/
wfaStaWpsEnterPin, /* WFA_STA_WPS_ENTER_PIN_TLV (59)*/
wfaStaGetPsk, /* WFA_STA_GET_PSK_TLV (60)*/
wfaStaP2pReset, /* WFA_STA_P2P_RESET_TLV (61)*/
wfaStaWpsReadLabel, /* WFA_STA_WPS_READ_LABEL_TLV (62)*/
wfaStaGetP2pIpConfig, /* WFA_STA_GET_P2P_IP_CONFIG_TLV (63)*/
wfaStaSendServiceDiscoveryReq, /* WFA_STA_SEND_SERVICE_DISCOVERY_REQ_TLV (64)*/
wfaStaSendP2pPresenceReq, /* WFA_STA_SEND_P2P_PRESENCE_REQ_TLV (65)*/
wfaStaSetSleepReq, /* WFA_STA_SEND_P2P_PRESENCE_REQ_TLV (66)*/
wfaStaSetOpportunisticPsReq, /* WFA_STA_SET_OPPORTUNISTIC_PS_TLV (67)*/
wfaStaAddArpTableEntry, /* WFA_STA_ADD_ARP_TABLE_ENTRY_TLV (68)*/
wfaStaBlockICMPResponse /* WFA_STA_BLOCK_ICMP_RESPONSE_TLV (69)*/
};
/*
* NotDefinedYet(): a dummy function
*/
int NotDefinedYet(int len, unsigned char *params, int *respLen, BYTE *respBuf)
{
DPRINT_WARNING(WFA_WNG, "The command processing function not defined.\n");
/* need to send back a response */
return TRUE;
}
| gpl-2.0 |
bigbluesky123/decaf-platform | roms/ipxe/src/image/segment.c | 90 | 2665 | /*
* Copyright (C) 2007 Michael Brown <mbrown@fensystems.co.uk>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
FILE_LICENCE ( GPL2_OR_LATER );
/**
* @file
*
* Executable image segments
*
*/
#include <errno.h>
#include <ipxe/uaccess.h>
#include <ipxe/io.h>
#include <ipxe/errortab.h>
#include <ipxe/segment.h>
/**
* Segment-specific error messages
*
* This error happens sufficiently often to merit a user-friendly
* description.
*/
#define ERANGE_SEGMENT __einfo_error ( EINFO_ERANGE_SEGMENT )
#define EINFO_ERANGE_SEGMENT \
__einfo_uniqify ( EINFO_ERANGE, 0x01, "Requested memory not available" )
struct errortab segment_errors[] __errortab = {
__einfo_errortab ( EINFO_ERANGE_SEGMENT ),
};
/**
* Prepare segment for loading
*
* @v segment Segment start
* @v filesz Size of the "allocated bytes" portion of the segment
* @v memsz Size of the segment
* @ret rc Return status code
*/
int prep_segment ( userptr_t segment, size_t filesz, size_t memsz ) {
struct memory_map memmap;
physaddr_t start = user_to_phys ( segment, 0 );
physaddr_t mid = user_to_phys ( segment, filesz );
physaddr_t end = user_to_phys ( segment, memsz );
unsigned int i;
DBG ( "Preparing segment [%lx,%lx,%lx)\n", start, mid, end );
/* Sanity check */
if ( filesz > memsz ) {
DBG ( "Insane segment [%lx,%lx,%lx)\n", start, mid, end );
return -EINVAL;
}
/* Get a fresh memory map. This allows us to automatically
* avoid treading on any regions that Etherboot is currently
* editing out of the memory map.
*/
get_memmap ( &memmap );
/* Look for a suitable memory region */
for ( i = 0 ; i < memmap.count ; i++ ) {
if ( ( start >= memmap.regions[i].start ) &&
( end <= memmap.regions[i].end ) ) {
/* Found valid region: zero bss and return */
memset_user ( segment, filesz, 0, ( memsz - filesz ) );
return 0;
}
}
/* No suitable memory region found */
DBG ( "Segment [%lx,%lx,%lx) does not fit into available memory\n",
start, mid, end );
return -ERANGE_SEGMENT;
}
| gpl-2.0 |
omnirom/android_kernel_google_msm | drivers/staging/prima/CORE/HDD/src/wlan_hdd_ftm.c | 90 | 144092 | /*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/**========================================================================
\file wlan_hdd_ftm.c
\brief This file contains the WLAN factory test mode implementation
Copyright 2008 (c) Qualcomm, Incorporated. All Rights Reserved.
Qualcomm Confidential and Proprietary.
========================================================================*/
/**=========================================================================
EDIT HISTORY FOR FILE
This section contains comments describing changes made to the module.
Notice that changes are listed in reverse chronological order.
$Header:$ $DateTime: $ $Author: $
when who what, where, why
-------- --- --------------------------------------------------------
04/20/11 Leo/Henri Convergence for Prima and Volans. Single image
for FTM and mission mode
04/5/09 Shailender Created module.
==========================================================================*/
#include <vos_mq.h>
#include "vos_sched.h"
#include <vos_api.h>
#include "sirTypes.h"
#include "halTypes.h"
#include "sirApi.h"
#include "sirMacProtDef.h"
#include "sme_Api.h"
#include "macInitApi.h"
#include "wlan_qct_sys.h"
#include "wlan_qct_tl.h"
#include "wlan_hdd_misc.h"
#include "i_vos_packet.h"
#include "vos_nvitem.h"
#include "wlan_hdd_main.h"
#include "vos_power.h"
#include "qwlan_version.h"
#include "wlan_nv.h"
#include "wlan_qct_wda.h"
#include "cfgApi.h"
#include "pttMsgApi.h"
#include "wlan_qct_pal_device.h"
#define RXMODE_DISABLE_ALL 0
#define RXMODE_ENABLE_ALL 1
#define RXMODE_ENABLE_11GN 2
#define RXMODE_ENABLE_11B 3
#define FTM_CHAIN_SEL_NO_RX_TX 0
#define FTM_CHAIN_SEL_R0_ON 1
#define FTM_CHAIN_SEL_T0_ON 2
#define FTM_CHAIN_SEL_R0_T0_ON 3
#define FTM_CHAIN_SEL_MAX 3
#ifndef QWLAN_PHYDBG_BASE
#define QWLAN_PHYDBG_BASE 0x03004000
#endif /* QWLAN_PHYDBG_BASE */
#ifndef QWLAN_PHYDBG_TXPKT_CNT_REG
#define QWLAN_PHYDBG_TXPKT_CNT_REG (QWLAN_PHYDBG_BASE + 0x6C)
#define QWLAN_PHYDBG_TXPKT_CNT_CNT_MASK 0xFFFF
#endif
#ifndef QWLAN_AGC_BASE
#define QWLAN_AGC_BASE 0x03013C00
#endif /* QWLAN_AGC_BASE */
#ifndef QWLAN_AGC_CHANNEL_FREQ_REG
#define QWLAN_AGC_CHANNEL_FREQ_REG (QWLAN_AGC_BASE + 0x34)
#define QWLAN_AGC_CHANNEL_FREQ_FREQ_MASK 0x1FFF
#endif /* QWLAN_AGC_CHANNEL_FREQ_REG */
#ifndef QWLAN_AGC_SUBBAND_CONFIG_REG
#define QWLAN_AGC_SUBBAND_CONFIG_REG (QWLAN_AGC_BASE + 0x30)
#define QWLAN_AGC_SUBBAND_CONFIG_STG2_SUBBAND_MASK 0x03
#endif /* QWLAN_AGC_SUBBAND_CONFIG_REG */
#ifndef QWLAN_RFAPB_BASE
#define QWLAN_RFAPB_BASE 0x0E02F800
#endif /* QWLAN_RFAPB_BASE */
#ifndef QWLAN_RFAPB_REV_ID_REG
#define QWLAN_RFAPB_REV_ID_REG (QWLAN_RFAPB_BASE + 0x00)
#endif /* QWLAN_RFAPB_REV_ID_REG */
#ifndef QWLAN_TXCTL_BASE
#define QWLAN_TXCTL_BASE 0x03012000
#endif /* QWLAN_TXCTL_BASE */
#ifndef QWLAN_TXCTL_FSHIFT_REG
#define QWLAN_TXCTL_FSHIFT_REG (QWLAN_TXCTL_BASE + 0x20)
#define QWLAN_TXCTL_FSHIFT_BW14_OFFSET 0x02
#define QWLAN_TXCTL_FSHIFT_BW14_MASK 0x1C
#define QWLAN_TXCTL_FSHIFT_BW12_OFFSET 0x00
#define QWLAN_TXCTL_FSHIFT_BW12_MASK 0x03
#endif /* QWLAN_TXCTL_FSHIFT_REG */
/* To set 4MAC addresses from given first MAC address,
* Last byte value within given MAC address must less than 0xFF - 3 */
#define QWLAN_MAX_MAC_LAST_BYTE_VALUE 0xFC
typedef struct {
tANI_U32 tableSize; /* Whole NV Table Size */
tANI_U32 chunkSize; /* Current Chunk Size < 2K */
eNvTable nvTable;
tANI_U8 tableData; /* Filled by host driver */
} pttGetNvTable;
typedef struct {
tANI_U32 tableSize; /* Whole NV Table Size */
tANI_U32 chunkSize; /* Current Chunk Size < 2K */
eNvTable nvTable;
tANI_U8 tableData;
} pttSetNvTable;
extern const sHalNv nvDefaults;
static int wlan_ftm_register_wext(hdd_adapter_t *pAdapter);
/* for PRIMA: all the available frequency, channal pair i the table are defined for channel frequency @ RF center frequency
Since it is associated to agc.channel_freq register for mapping.
For channel bonding, the channel number is +2 or -2 for CB with primary high, or with primary low respectively.
*/
static const freq_chan_t freq_chan_tbl[] = {
{2412, 1}, {2417, 2},{2422, 3}, {2427, 4}, {2432, 5}, {2437, 6}, {2442, 7},
{2447, 8}, {2452, 9},{2457, 10},{2462, 11},{2467 ,12},{2472, 13},{2484, 14}
};
static rateStr2rateIndex_t rateName_rateIndex_tbl[] =
{
{ HAL_PHY_RATE_11B_LONG_1_MBPS, "11B_LONG_1_MBPS"},
{ HAL_PHY_RATE_11B_LONG_2_MBPS, "11B_LONG_2_MBPS"},
{ HAL_PHY_RATE_11B_LONG_5_5_MBPS, "11B_LONG_5_5_MBPS"},
{ HAL_PHY_RATE_11B_LONG_11_MBPS, "11B_LONG_11_MBPS"},
{ HAL_PHY_RATE_11B_SHORT_2_MBPS, "11B_SHORT_2_MBPS"},
{ HAL_PHY_RATE_11B_SHORT_5_5_MBPS, "11B_SHORT_5_5_MBPS"},
{ HAL_PHY_RATE_11B_SHORT_11_MBPS, "11B_SHORT_11_MBPS"},
//Spica_Virgo 11A 20MHz Rates
{ HAL_PHY_RATE_11A_6_MBPS, "11A_6_MBPS"},
{ HAL_PHY_RATE_11A_9_MBPS, "11A_9_MBPS"},
{ HAL_PHY_RATE_11A_12_MBPS, "11A_12_MBPS"},
{ HAL_PHY_RATE_11A_18_MBPS, "11A_18_MBPS"},
{ HAL_PHY_RATE_11A_24_MBPS, "11A_24_MBPS"},
{ HAL_PHY_RATE_11A_36_MBPS, "11A_36_MBPS"},
{ HAL_PHY_RATE_11A_48_MBPS, "11A_48_MBPS"},
{ HAL_PHY_RATE_11A_54_MBPS, "11A_54_MBPS"},
//MCS Index #0-15 (20MHz)
{ HAL_PHY_RATE_MCS_1NSS_6_5_MBPS, "MCS_6_5_MBPS"},
{ HAL_PHY_RATE_MCS_1NSS_13_MBPS, "MCS_13_MBPS"},
{ HAL_PHY_RATE_MCS_1NSS_19_5_MBPS, "MCS_19_5_MBPS"},
{ HAL_PHY_RATE_MCS_1NSS_26_MBPS, "MCS_26_MBPS"},
{ HAL_PHY_RATE_MCS_1NSS_39_MBPS, "MCS_39_MBPS"},
{ HAL_PHY_RATE_MCS_1NSS_52_MBPS, "MCS_52_MBPS"},
{ HAL_PHY_RATE_MCS_1NSS_58_5_MBPS, "MCS_58_5_MBPS"},
{ HAL_PHY_RATE_MCS_1NSS_65_MBPS, "MCS_65_MBPS"},
{ HAL_PHY_RATE_MCS_1NSS_MM_SG_72_2_MBPS, "MCS_72_2_MBPS"}
};
static rateIndex2Preamble_t rate_index_2_preamble_table[] =
{
{ HAL_PHY_RATE_11B_LONG_1_MBPS, PHYDBG_PREAMBLE_LONGB},
{ HAL_PHY_RATE_11B_LONG_2_MBPS, PHYDBG_PREAMBLE_LONGB},
{ HAL_PHY_RATE_11B_LONG_5_5_MBPS, PHYDBG_PREAMBLE_LONGB},
{ HAL_PHY_RATE_11B_LONG_11_MBPS, PHYDBG_PREAMBLE_LONGB},
{ HAL_PHY_RATE_11B_SHORT_2_MBPS, PHYDBG_PREAMBLE_SHORTB},
{ HAL_PHY_RATE_11B_SHORT_5_5_MBPS, PHYDBG_PREAMBLE_SHORTB},
{ HAL_PHY_RATE_11B_SHORT_11_MBPS, PHYDBG_PREAMBLE_SHORTB},
//Spica_Virgo 11A 20MHz Rates
{ HAL_PHY_RATE_11A_6_MBPS, PHYDBG_PREAMBLE_OFDM},
{ HAL_PHY_RATE_11A_9_MBPS, PHYDBG_PREAMBLE_OFDM},
{ HAL_PHY_RATE_11A_12_MBPS, PHYDBG_PREAMBLE_OFDM},
{ HAL_PHY_RATE_11A_18_MBPS, PHYDBG_PREAMBLE_OFDM},
{ HAL_PHY_RATE_11A_24_MBPS, PHYDBG_PREAMBLE_OFDM},
{ HAL_PHY_RATE_11A_36_MBPS, PHYDBG_PREAMBLE_OFDM},
{ HAL_PHY_RATE_11A_48_MBPS, PHYDBG_PREAMBLE_OFDM},
{ HAL_PHY_RATE_11A_54_MBPS, PHYDBG_PREAMBLE_OFDM},
//MCS Index #0-15 (20MHz)
{ HAL_PHY_RATE_MCS_1NSS_6_5_MBPS, PHYDBG_PREAMBLE_MIXED},
{ HAL_PHY_RATE_MCS_1NSS_13_MBPS, PHYDBG_PREAMBLE_MIXED},
{ HAL_PHY_RATE_MCS_1NSS_19_5_MBPS, PHYDBG_PREAMBLE_MIXED},
{ HAL_PHY_RATE_MCS_1NSS_26_MBPS, PHYDBG_PREAMBLE_MIXED},
{ HAL_PHY_RATE_MCS_1NSS_39_MBPS, PHYDBG_PREAMBLE_MIXED},
{ HAL_PHY_RATE_MCS_1NSS_52_MBPS, PHYDBG_PREAMBLE_MIXED},
{ HAL_PHY_RATE_MCS_1NSS_58_5_MBPS, PHYDBG_PREAMBLE_MIXED},
{ HAL_PHY_RATE_MCS_1NSS_65_MBPS, PHYDBG_PREAMBLE_MIXED},
{ HAL_PHY_RATE_MCS_1NSS_MM_SG_7_2_MBPS, PHYDBG_PREAMBLE_NOT_SUPPORTED},
{ HAL_PHY_RATE_MCS_1NSS_MM_SG_14_4_MBPS,PHYDBG_PREAMBLE_NOT_SUPPORTED},
{ HAL_PHY_RATE_MCS_1NSS_MM_SG_21_7_MBPS,PHYDBG_PREAMBLE_NOT_SUPPORTED},
{ HAL_PHY_RATE_MCS_1NSS_MM_SG_28_9_MBPS,PHYDBG_PREAMBLE_NOT_SUPPORTED},
{ HAL_PHY_RATE_MCS_1NSS_MM_SG_43_3_MBPS,PHYDBG_PREAMBLE_NOT_SUPPORTED},
{ HAL_PHY_RATE_MCS_1NSS_MM_SG_57_8_MBPS,PHYDBG_PREAMBLE_NOT_SUPPORTED},
{ HAL_PHY_RATE_MCS_1NSS_MM_SG_65_MBPS, PHYDBG_PREAMBLE_NOT_SUPPORTED},
{ HAL_PHY_RATE_MCS_1NSS_MM_SG_72_2_MBPS, PHYDBG_PREAMBLE_MIXED},
};
typedef struct
{
tANI_BOOLEAN frameGenEnabled;
tANI_BOOLEAN wfmEnabled;
sPttFrameGenParams frameParams;
v_U16_t txpower;
v_U16_t rxmode;
v_U16_t chainSelect;
} FTM_STATUS ;
static FTM_STATUS ftm_status;
//tpAniSirGlobal pMac;
static void _ftm_status_init(void)
{
tANI_U8 addr1[ANI_MAC_ADDR_SIZE] = { 0x00, 0x11, 0x11, 0x11, 0x11, 0x11 }; //dest
tANI_U8 addr2[ANI_MAC_ADDR_SIZE] = { 0x00, 0x22, 0x22, 0x22, 0x22, 0x22 }; //sour
tANI_U8 addr3[ANI_MAC_ADDR_SIZE] = { 0x00, 0x33, 0x33, 0x33, 0x33, 0x33 }; //bssId
ftm_status.wfmEnabled = eANI_BOOLEAN_FALSE;
ftm_status.frameGenEnabled = eANI_BOOLEAN_FALSE;
ftm_status.frameParams.numTestPackets = 0; //Continuous
ftm_status.frameParams.interFrameSpace = 10;
ftm_status.frameParams.rate = HAL_PHY_RATE_11A_6_MBPS;
ftm_status.frameParams.payloadContents = TEST_PAYLOAD_RANDOM;
ftm_status.frameParams.payloadLength = 2000;
ftm_status.frameParams.payloadFillByte = 0xA5;
ftm_status.frameParams.pktAutoSeqNum = eANI_BOOLEAN_FALSE;
ftm_status.frameParams.tx_mode = 0;
ftm_status.frameParams.crc = 0;
ftm_status.frameParams.preamble = PHYDBG_PREAMBLE_OFDM;
memcpy(&ftm_status.frameParams.addr1[0], addr1, ANI_MAC_ADDR_SIZE);
memcpy(&ftm_status.frameParams.addr2[0], addr2, ANI_MAC_ADDR_SIZE);
memcpy(&ftm_status.frameParams.addr3[0], addr3, ANI_MAC_ADDR_SIZE);
ftm_status.txpower = 2 ;
ftm_status.rxmode = RXMODE_ENABLE_ALL; /* macStart() enables all receive pkt types */
ftm_status.chainSelect = FTM_CHAIN_SEL_R0_T0_ON;
return;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_postmsg() -
The function used for sending the command to the halphy.
\param - cmd_ptr - Pointer command buffer.
\param - cmd_len - Command length.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static v_U32_t wlan_ftm_postmsg(v_U8_t *cmd_ptr, v_U16_t cmd_len)
{
vos_msg_t *ftmReqMsg;
vos_msg_t ftmMsg;
ENTER();
ftmReqMsg = (vos_msg_t *) cmd_ptr;
ftmMsg.type = WDA_FTM_CMD_REQ;
ftmMsg.reserved = 0;
ftmMsg.bodyptr = (v_U8_t*)cmd_ptr;
ftmMsg.bodyval = 0;
/* Use Vos messaging mechanism to send the command to halPhy */
if (VOS_STATUS_SUCCESS != vos_mq_post_message(
VOS_MODULE_ID_WDA,
(vos_msg_t *)&ftmMsg)) {
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: : Failed to post Msg to HAL\n",__func__);
return VOS_STATUS_E_FAILURE;
}
EXIT();
return VOS_STATUS_SUCCESS;
}
/*---------------------------------------------------------------------------
\brief wlan_ftm_vos_open() - Open the vOSS Module
The \a wlan_ftm_vos_open() function opens the vOSS Scheduler
Upon successful initialization:
- All VOS submodules should have been initialized
- The VOS scheduler should have opened
- All the WLAN SW components should have been opened. This include
MAC.
\param hddContextSize: Size of the HDD context to allocate.
\return VOS_STATUS_SUCCESS - Scheduler was successfully initialized and
is ready to be used.
VOS_STATUS_E_RESOURCES - System resources (other than memory)
are unavailable to initialize the scheduler
VOS_STATUS_E_FAILURE - Failure to initialize the scheduler/
\sa wlan_ftm_vos_open()
---------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_vos_open( v_CONTEXT_t pVosContext, v_SIZE_t hddContextSize )
{
VOS_STATUS vStatus = VOS_STATUS_SUCCESS;
int iter = 0;
tSirRetStatus sirStatus = eSIR_SUCCESS;
tMacOpenParameters macOpenParms;
pVosContextType gpVosContext = (pVosContextType)pVosContext;
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_INFO_HIGH,
"%s: Opening VOSS", __func__);
if (NULL == gpVosContext)
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Trying to open VOSS without a PreOpen",__func__);
VOS_ASSERT(0);
return VOS_STATUS_E_FAILURE;
}
/* Initialize the probe event */
if (vos_event_init(&gpVosContext->ProbeEvent) != VOS_STATUS_SUCCESS)
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Unable to init probeEvent",__func__);
VOS_ASSERT(0);
return VOS_STATUS_E_FAILURE;
}
if(vos_event_init(&(gpVosContext->wdaCompleteEvent)) != VOS_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Unable to init wdaCompleteEvent",__func__);
VOS_ASSERT(0);
goto err_probe_event;
}
/* Initialize the free message queue */
vStatus = vos_mq_init(&gpVosContext->freeVosMq);
if (! VOS_IS_STATUS_SUCCESS(vStatus))
{
/* Critical Error ... Cannot proceed further */
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to initialize VOS free message queue",__func__);
VOS_ASSERT(0);
goto err_wda_complete_event;
}
for (iter = 0; iter < VOS_CORE_MAX_MESSAGES; iter++)
{
(gpVosContext->aMsgWrappers[iter]).pVosMsg =
&(gpVosContext->aMsgBuffers[iter]);
INIT_LIST_HEAD(&gpVosContext->aMsgWrappers[iter].msgNode);
vos_mq_put(&gpVosContext->freeVosMq, &(gpVosContext->aMsgWrappers[iter]));
}
/* Now Open the VOS Scheduler */
vStatus= vos_sched_open(gpVosContext, &gpVosContext->vosSched,
sizeof(VosSchedContext));
if (!VOS_IS_STATUS_SUCCESS(vStatus))
{
/* Critical Error ... Cannot proceed further */
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to open VOS Scheduler", __func__);
VOS_ASSERT(0);
goto err_msg_queue;
}
/* Open the SYS module */
vStatus = sysOpen(gpVosContext);
if (!VOS_IS_STATUS_SUCCESS(vStatus))
{
/* Critical Error ... Cannot proceed further */
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to open SYS module",__func__);
VOS_ASSERT(0);
goto err_sched_close;
}
/*Open the WDA module */
vos_mem_set(&macOpenParms, sizeof(macOpenParms), 0);
macOpenParms.driverType = eDRIVER_TYPE_MFG;
vStatus = WDA_open(gpVosContext, gpVosContext->pHDDContext, &macOpenParms);
if (!VOS_IS_STATUS_SUCCESS(vStatus))
{
/* Critical Error ... Cannot proceed further */
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to open WDA module",__func__);
VOS_ASSERT(0);
goto err_sys_close;
}
/* initialize the NV module */
vStatus = vos_nv_open();
if (!VOS_IS_STATUS_SUCCESS(vStatus))
{
// NV module cannot be initialized, however the driver is allowed
// to proceed
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to initialize the NV module", __func__);
goto err_wda_close;
}
/* If we arrive here, both threads dispacthing messages correctly */
/* Now proceed to open the MAC */
/* UMA is supported in hardware for performing the
frame translation 802.11 <-> 802.3 */
macOpenParms.frameTransRequired = 1;
sirStatus = macOpen(&(gpVosContext->pMACContext), gpVosContext->pHDDContext,
&macOpenParms);
if (eSIR_SUCCESS != sirStatus)
{
/* Critical Error ... Cannot proceed further */
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to open MAC", __func__);
VOS_ASSERT(0);
goto err_nv_close;
}
/* Now proceed to open the SME */
vStatus = sme_Open(gpVosContext->pMACContext);
if (!VOS_IS_STATUS_SUCCESS(vStatus))
{
/* Critical Error ... Cannot proceed further */
VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to open SME",__func__);
goto err_mac_close;
}
return VOS_STATUS_SUCCESS;
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_INFO_HIGH,
"%s: VOSS successfully Opened",__func__);
return VOS_STATUS_SUCCESS;
err_mac_close:
macClose(gpVosContext->pMACContext);
err_nv_close:
vos_nv_close();
err_wda_close:
WDA_close(gpVosContext);
err_sys_close:
sysClose(gpVosContext);
err_sched_close:
vos_sched_close(gpVosContext);
err_msg_queue:
vos_mq_deinit(&gpVosContext->freeVosMq);
err_wda_complete_event:
vos_event_destroy(&gpVosContext->wdaCompleteEvent);
err_probe_event:
vos_event_destroy(&gpVosContext->ProbeEvent);
return VOS_STATUS_E_FAILURE;
} /* wlan_ftm_vos_open() */
/*---------------------------------------------------------------------------
\brief wlan_ftm_vos_close() - Close the vOSS Module
The \a wlan_ftm_vos_close() function closes the vOSS Module
\param vosContext context of vos
\return VOS_STATUS_SUCCESS - successfully closed
\sa wlan_ftm_vos_close()
---------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_vos_close( v_CONTEXT_t vosContext )
{
VOS_STATUS vosStatus;
pVosContextType gpVosContext = (pVosContextType)vosContext;
vosStatus = sme_Close(((pVosContextType)vosContext)->pMACContext);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to close BAL",__func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
vosStatus = macClose( ((pVosContextType)vosContext)->pMACContext);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to close MAC",__func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
((pVosContextType)vosContext)->pMACContext = NULL;
vosStatus = vos_nv_close();
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to close NV",__func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
vosStatus = sysClose( vosContext );
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to close SYS",__func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
vosStatus = WDA_close( vosContext );
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to close WDA",__func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
vos_mq_deinit(&((pVosContextType)vosContext)->freeVosMq);
vosStatus = vos_event_destroy(&gpVosContext->ProbeEvent);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to destroy ProbeEvent",__func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
vosStatus = vos_event_destroy(&gpVosContext->wdaCompleteEvent);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to destroy wdaCompleteEvent",__func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
return VOS_STATUS_SUCCESS;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_set_txifs() -
This function is used for
\param - pAdapter - Pointer HDD Context.
- ifs
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_set_txifs(hdd_adapter_t *pAdapter,v_U32_t ifs)
{
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
/* do not allow to change setting when tx pktgen is enabled */
if (ftm_status.frameGenEnabled)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:cannot set txifs when pktgen is enabled.",__func__);
return VOS_STATUS_E_FAILURE;
}
if (ifs > 100000) //max = (MSK_24 / ONE_MICROSECOND)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:ifs value is invalid ",__func__);
return VOS_STATUS_E_FAILURE;
}
ftm_status.frameParams.interFrameSpace = ifs;
return VOS_STATUS_SUCCESS;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_set_txpktcnt() -
This function is used for
\param - pAdapter - Pointer HDD Context.
- ifs
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_set_txpktcnt(hdd_adapter_t *pAdapter,v_U32_t cnt)
{
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
/* do not allow to change setting when tx pktgen is enabled */
if (ftm_status.frameGenEnabled)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:cannot set txpktcnt when pktgen is enabled.",__func__);
return VOS_STATUS_E_FAILURE;
}
if (cnt > QWLAN_PHYDBG_TXPKT_CNT_CNT_MASK) //0xFFFF
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pktcnt value is invalid",__func__);
return VOS_STATUS_E_FAILURE;
}
ftm_status.frameParams.numTestPackets = cnt;
return VOS_STATUS_SUCCESS;
}
static VOS_STATUS wlan_ftm_priv_set_txpktlen(hdd_adapter_t *pAdapter,v_U32_t len)
{
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
/* do not allow to change setting when tx pktgen is enabled */
if (ftm_status.frameGenEnabled)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:cannot set txpktcnt when pktgen is enabled.",__func__);
return VOS_STATUS_E_FAILURE;
}
if (len > 4095) //4096
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:payload len is invalid",__func__);
return VOS_STATUS_E_FAILURE;
}
ftm_status.frameParams.payloadLength = (tANI_U16)len;
return VOS_STATUS_SUCCESS;
}
/**---------------------------------------------------------------------------
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_enable_chain(hdd_adapter_t *pAdapter,v_U16_t chainSelect)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
v_U16_t chainSelect_save = chainSelect;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
if (chainSelect > FTM_CHAIN_SEL_MAX)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Invalid chain",__func__);
return VOS_STATUS_E_FAILURE;
}
/* do not allow to change setting when tx pktgen is enabled */
if (ftm_status.frameGenEnabled)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:cannot select chain when pktgen is enabled.",__func__);
return VOS_STATUS_E_FAILURE;
}
switch (chainSelect)
{
case FTM_CHAIN_SEL_NO_RX_TX:
chainSelect = PHY_CHAIN_SEL_NO_RX_TX;
break;
case FTM_CHAIN_SEL_R0_ON:
chainSelect = PHY_CHAIN_SEL_R0_ON;
break;
case FTM_CHAIN_SEL_T0_ON:
chainSelect = PHY_CHAIN_SEL_T0_ON;
break;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_ENABLE_CHAINS;
pMsgBuf->msgBodyLength = sizeof(tMsgPttEnableChains) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->EnableChains.chainSelect = chainSelect;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
ftm_status.chainSelect = chainSelect_save;
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_get_status(hdd_adapter_t *pAdapter,char *buf)
{
int ii;
int lenBuf = WE_FTM_MAX_STR_LEN;
int lenRes = 0;
char *chain[] = {
"None",
"R0,R1",
"R0",
"R1",
"T0",
"R0,R1,T0"
};
char *rx[] = {
"disable",
"11b/g/n",
"11g/n",
"11b"
};
char *tx[] = {
"stopped",
"started",
};
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
lenRes = snprintf(buf, lenBuf, "\n chainSelect: %s\n rxmode: %s\n "
"txpktgen: %s\n txifs: %ld\n txrate: ",
chain[ftm_status.chainSelect], rx[ftm_status.rxmode],
tx[ftm_status.frameGenEnabled],
ftm_status.frameParams.interFrameSpace);
if ((lenRes < 0) || (lenRes >= lenBuf))
{
return VOS_STATUS_E_FAILURE;
}
buf += lenRes;
lenBuf -= lenRes;
for (ii = 0; ii < SIZE_OF_TABLE(rateName_rateIndex_tbl); ii++)
{
if (rateName_rateIndex_tbl[ii].rate_index == ftm_status.frameParams.rate)
break;
}
if (ii < SIZE_OF_TABLE(rateName_rateIndex_tbl))
{
lenRes = strlcpy(buf, rateName_rateIndex_tbl[ii].rate_str, lenBuf);
}
else
{
lenRes = strlcpy(buf, "invalid", lenBuf);
}
if ((lenRes < 0) || (lenRes >= lenBuf))
{
return VOS_STATUS_E_FAILURE;
}
buf += lenRes;
lenBuf -= lenRes;
lenRes = snprintf(buf, lenBuf, "\n txpower: %d\n txpktcnt: %ld\n "
"txpktlen: %d\n", ftm_status.txpower,
ftm_status.frameParams.numTestPackets,
ftm_status.frameParams.payloadLength);
if ((lenRes < 0) || (lenRes >= lenBuf))
{
return VOS_STATUS_E_FAILURE;
}
return VOS_STATUS_SUCCESS;
}
void HEXDUMP(char *s0, char *s1, int len)
{
int tmp;
printk(KERN_EMERG "%s\n :", s0);
for (tmp = 0; tmp< len; tmp++) {
printk(KERN_EMERG "%02x ", *s1++);
}
printk("\n");
}
/*---------------------------------------------------------------------------
\brief vos_ftm_preStart() -
The \a vos_ftm_preStart() function to download CFG.
including:
- ccmStart
- WDA: triggers the CFG download
\param pVosContext: The VOS context
\return VOS_STATUS_SUCCESS - Scheduler was successfully initialized and
is ready to be used.
VOS_STATUS_E_RESOURCES - System resources (other than memory)
are unavailable to initialize the scheduler
VOS_STATUS_E_FAILURE - Failure to initialize the scheduler/
\sa vos_start
---------------------------------------------------------------------------*/
VOS_STATUS vos_ftm_preStart( v_CONTEXT_t vosContext )
{
VOS_STATUS vStatus = VOS_STATUS_SUCCESS;
pVosContextType pVosContext = (pVosContextType)vosContext;
VOS_TRACE(VOS_MODULE_ID_SYS, VOS_TRACE_LEVEL_INFO,
"vos prestart");
VOS_ASSERT( NULL != pVosContext->pWDAContext);
/* call macPreStart */
vStatus = macPreStart(pVosContext->pMACContext);
if ( !VOS_IS_STATUS_SUCCESS(vStatus) )
{
VOS_TRACE(VOS_MODULE_ID_SYS, VOS_TRACE_LEVEL_ERROR,
"Failed at macPreStart ");
return VOS_STATUS_E_FAILURE;
}
/* call ccmStart */
ccmStart(pVosContext->pMACContext);
/* Reset wda wait event */
vos_event_reset(&pVosContext->wdaCompleteEvent);
/*call WDA pre start*/
vStatus = WDA_preStart(pVosContext);
if (!VOS_IS_STATUS_SUCCESS(vStatus))
{
VOS_TRACE(VOS_MODULE_ID_SYS, VOS_TRACE_LEVEL_ERROR,
"Failed to WDA prestart ");
macStop(pVosContext->pMACContext, HAL_STOP_TYPE_SYS_DEEP_SLEEP);
ccmStop(pVosContext->pMACContext);
VOS_ASSERT(0);
return VOS_STATUS_E_FAILURE;
}
/* Need to update time out of complete */
vStatus = vos_wait_single_event( &pVosContext->wdaCompleteEvent, 1000);
if ( vStatus != VOS_STATUS_SUCCESS )
{
if ( vStatus == VOS_STATUS_E_TIMEOUT )
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Timeout occurred before WDA complete\n",__func__);
}
else
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: WDA_preStart reporting other error \n",__func__);
}
VOS_ASSERT( 0 );
return VOS_STATUS_E_FAILURE;
}
return VOS_STATUS_SUCCESS;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_open() -
The function hdd_wlan_startup calls this function to initialize the FTM specific modules.
\param - pAdapter - Pointer HDD Context.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
int wlan_hdd_ftm_open(hdd_context_t *pHddCtx)
{
VOS_STATUS vStatus = VOS_STATUS_SUCCESS;
pVosContextType pVosContext= NULL;
hdd_adapter_t *pAdapter;
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"%s: Opening VOSS", __func__);
pVosContext = vos_get_global_context(VOS_MODULE_ID_SYS, NULL);
if (NULL == pVosContext)
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: Trying to open VOSS without a PreOpen",__func__);
VOS_ASSERT(0);
goto err_vos_status_failure;
}
// Open VOSS
vStatus = wlan_ftm_vos_open( pVosContext, 0);
if ( !VOS_IS_STATUS_SUCCESS( vStatus ))
{
hddLog(VOS_TRACE_LEVEL_FATAL,"%s: vos_open failed",__func__);
goto err_vos_status_failure;
}
/*
For Integrated SOC, only needed to start WDA, whihc happens in wlan_hdd_ftm_start()
*/
/* Save the hal context in Adapter */
pHddCtx->hHal = (tHalHandle)vos_get_context(VOS_MODULE_ID_SME, pVosContext );
if ( NULL == pHddCtx->hHal )
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: HAL context is null",__func__);
goto err_sal_close;
}
pAdapter = hdd_open_adapter( pHddCtx, WLAN_HDD_FTM, "wlan%d",
wlan_hdd_get_intf_addr(pHddCtx), FALSE);
if( NULL == pAdapter )
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: hdd_open_adapter failed",__func__);
goto err_adapter_open_failure;
}
if( wlan_ftm_register_wext(pAdapter)!= 0 )
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%S: hdd_register_wext failed",__func__);
goto err_sal_close;
}
//Initialize the nlink service
if(nl_srv_init() != 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%S: nl_srv_init failed",__func__);
goto err_ftm_register_wext_close;
}
#ifdef PTT_SOCK_SVC_ENABLE
//Initialize the PTT service
if(ptt_sock_activate_svc(pHddCtx) != 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: ptt_sock_activate_svc failed",__func__);
goto err_nl_srv_init;
}
#endif
if (!VOS_IS_STATUS_SUCCESS(vos_chipVoteOnXOBuffer(NULL, NULL, NULL)))
{
hddLog(VOS_TRACE_LEVEL_FATAL, "%s: Failed to configure 19.2 MHz Clock", __func__);
goto err_nl_srv_init;
}
#ifdef HDD_SESSIONIZE
//Turn off carrier state
netif_carrier_off(pAdapter->dev);
//Stop the Interface TX queue. Just being safe
netif_tx_disable(pAdapter->dev);
#endif
pHddCtx->ftm.processingNVTable = NV_MAX_TABLE;
pHddCtx->ftm.targetNVTableSize = 0;
pHddCtx->ftm.targetNVTablePointer = NULL;
pHddCtx->ftm.processedNVTableSize = 0;
pHddCtx->ftm.tempNVTableBuffer = (v_U8_t *)vos_mem_malloc(MAX_NV_TABLE_SIZE);
if(NULL == pHddCtx->ftm.tempNVTableBuffer)
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: NV Table Buffer Alloc Fail",__func__);
VOS_ASSERT(0);
goto err_nl_srv_init;
}
vos_mem_zero((v_VOID_t *)pHddCtx->ftm.tempNVTableBuffer, MAX_NV_TABLE_SIZE);
_ftm_status_init();
/* Initialize the ftm vos event */
if (vos_event_init(&pHddCtx->ftm.ftm_vos_event) != VOS_STATUS_SUCCESS)
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: Unable to init probeEvent",__func__);
VOS_ASSERT(0);
vos_mem_free(pHddCtx->ftm.tempNVTableBuffer);
goto err_nl_srv_init;
}
pHddCtx->ftm.ftm_state = WLAN_FTM_INITIALIZED;
return VOS_STATUS_SUCCESS;
err_nl_srv_init:
nl_srv_exit();
err_ftm_register_wext_close:
hdd_UnregisterWext(pAdapter->dev);
err_adapter_open_failure:
hdd_close_all_adapters( pHddCtx );
err_sal_close:
err_vos_status_failure:
return VOS_STATUS_E_FAILURE;
}
int wlan_hdd_ftm_close(hdd_context_t *pHddCtx)
{
VOS_STATUS vosStatus;
v_CONTEXT_t vosContext = pHddCtx->pvosContext;
hdd_adapter_t *pAdapter = hdd_get_adapter(pHddCtx,WLAN_HDD_FTM);
ENTER();
if(pAdapter == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pAdapter is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
//Assert Deep sleep signal now to put Libra HW in lowest power state
vosStatus = vos_chipAssertDeepSleep( NULL, NULL, NULL );
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
//Vote off any PMIC voltage supplies
vos_chipPowerDown(NULL, NULL, NULL);
vos_chipVoteOffXOBuffer(NULL, NULL, NULL);
nl_srv_exit();
//TODO----------
//Deregister the device with the kernel
hdd_UnregisterWext(pAdapter->dev);
hdd_close_all_adapters( pHddCtx );
#if 0
if(test_bit(NET_DEVICE_REGISTERED, &pAdapter->event_flags))
{
unregister_netdev(pAdapter->dev);
clear_bit(NET_DEVICE_REGISTERED, &pAdapter->event_flags);
}
#endif
//-----------------
vosStatus = vos_sched_close( vosContext );
if (!VOS_IS_STATUS_SUCCESS(vosStatus)) {
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to close VOSS Scheduler",__func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
//Close VOSS
wlan_ftm_vos_close(vosContext);
vosStatus = vos_event_destroy(&pHddCtx->ftm.ftm_vos_event);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to destroy ftm_vos Event",__func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
vos_mem_free(pHddCtx->ftm.tempNVTableBuffer);
//Free up dynamically allocated members inside HDD Adapter
kfree(pHddCtx->cfg_ini);
pHddCtx->cfg_ini= NULL;
return 0;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_send_response() -
The function sends the response to the ptt socket application running in user space.
\param - pAdapter - Pointer HDD Context.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_send_response(hdd_context_t *pHddCtx){
if( ptt_sock_send_msg_to_app(&pHddCtx->ftm.wnl->wmsg, 0, ANI_NL_MSG_PUMAC, pHddCtx->ftm.wnl->nlh.nlmsg_pid) < 0) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, ("Ptt Socket error sending message to the app!!\n"));
return VOS_STATUS_E_FAILURE;
}
return VOS_STATUS_SUCCESS;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_start() -
This function gets called when the FTM start commands received from the ptt socket application and
it starts the following modules.
1) SAL Start.
2) BAL Start.
3) MAC Start to download the firmware.
\param - pAdapter - Pointer HDD Context.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static int wlan_hdd_ftm_start(hdd_context_t *pHddCtx)
{
VOS_STATUS vStatus = VOS_STATUS_SUCCESS;
tSirRetStatus sirStatus = eSIR_SUCCESS;
pVosContextType pVosContext = (pVosContextType)(pHddCtx->pvosContext);
tHalMacStartParameters halStartParams;
if (WLAN_FTM_STARTED == pHddCtx->ftm.ftm_state)
{
return VOS_STATUS_SUCCESS;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: Starting Libra SW", __func__);
/* We support only one instance for now ...*/
if (pVosContext == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: mismatch in context",__func__);
goto err_status_failure;
}
if (pVosContext->pMACContext == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: MAC NULL context",__func__);
goto err_status_failure;
}
/*
Prima needs to start the WDA correctly instead of BAL and SAL
*/
/* Vos preStart is calling */
if ( !VOS_IS_STATUS_SUCCESS(vos_ftm_preStart(pHddCtx->pvosContext) ) )
{
hddLog(VOS_TRACE_LEVEL_FATAL,"%s: vos_preStart failed",__func__);
goto err_status_failure;
}
vStatus = WDA_NVDownload_Start(pVosContext);
if ( vStatus != VOS_STATUS_SUCCESS )
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to start NV Download",__func__);
return VOS_STATUS_E_FAILURE;
}
vStatus = vos_wait_single_event(&(pVosContext->wdaCompleteEvent), 1000);
if ( vStatus != VOS_STATUS_SUCCESS )
{
if ( vStatus == VOS_STATUS_E_TIMEOUT )
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Timeout occurred before WDA_NVDownload_Start complete\n",__func__);
}
else
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: WDA_NVDownload_Start reporting other error \n",__func__);
}
VOS_ASSERT(0);
goto err_wda_stop;
}
vStatus = WDA_start(pVosContext);
if (vStatus != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to start WDA",__func__);
goto err_status_failure;
}
/* Start the MAC */
vos_mem_zero((v_PVOID_t)&halStartParams, sizeof(tHalMacStartParameters));
halStartParams.driverType = eDRIVER_TYPE_MFG;
/* Start the MAC */
sirStatus = macStart(pVosContext->pMACContext,(v_PVOID_t)&halStartParams);
if (eSIR_SUCCESS != sirStatus)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to start MAC", __func__);
goto err_wda_stop;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: MAC correctly started",__func__);
pHddCtx->ftm.ftm_state = WLAN_FTM_STARTED;
return VOS_STATUS_SUCCESS;
err_wda_stop:
vos_event_reset(&(pVosContext->wdaCompleteEvent));
WDA_stop(pVosContext, HAL_STOP_TYPE_RF_KILL);
vStatus = vos_wait_single_event(&(pVosContext->wdaCompleteEvent), 1000);
if(vStatus != VOS_STATUS_SUCCESS)
{
if(vStatus == VOS_STATUS_E_TIMEOUT)
{
VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Timeout occurred before WDA_stop complete\n",__func__);
}
else
{
VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: WDA_stop reporting other error \n",__func__);
}
VOS_ASSERT(0);
}
err_status_failure:
return VOS_STATUS_E_FAILURE;
}
static int wlan_ftm_stop(hdd_context_t *pHddCtx)
{
VOS_STATUS vosStatus;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
//if(pHddCtx->ftm.cmd_iwpriv == TRUE)
{
/* STOP MAC only */
v_VOID_t *hHal;
hHal = vos_get_context( VOS_MODULE_ID_SME, pHddCtx->pvosContext );
if (NULL == hHal)
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: NULL hHal", __func__);
}
else
{
vosStatus = macStop(hHal, HAL_STOP_TYPE_SYS_DEEP_SLEEP );
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to stop SYS", __func__);
VOS_ASSERT( VOS_IS_STATUS_SUCCESS( vosStatus ) );
}
}
WDA_stop(pHddCtx->pvosContext, HAL_STOP_TYPE_RF_KILL);
}
return WLAN_FTM_SUCCESS;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_get_nv_table() -
Get Specific NV table
\param - ftmCmd - Pointer FTM Commad Buffer
\return - int
-1, Process Host command fail, vail out
1, Process Host command success
--------------------------------------------------------------------------*/
int wlan_hdd_ftm_get_nv_table
(
hdd_context_t *pHddCtx,
tPttMsgbuffer *ftmCmd
)
{
VOS_STATUS nvStatus = VOS_STATUS_SUCCESS;
pttGetNvTable *nvTable = (pttGetNvTable *)&ftmCmd->msgBody.GetNvTable;
v_SIZE_t nvSize;
sHalNv *nvContents = NULL;
if (NULL == pHddCtx)
{
VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_FATAL,
"Not valid driver context");
return -EINVAL;
}
/* Test first chunk of NV table */
if ((NV_MAX_TABLE == pHddCtx->ftm.processingNVTable) ||
(0 == pHddCtx->ftm.processedNVTableSize))
{
nvStatus = vos_nv_getNVBuffer((void **)&nvContents, &nvSize);
if ((VOS_STATUS_SUCCESS != nvStatus) || (NULL == nvContents))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_FATAL,
"Fail to get cached NV value Status %d", nvStatus);
return -EIO;
}
switch (nvTable->nvTable)
{
case NV_TABLE_RATE_POWER_SETTINGS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.pwrOptimum);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.pwrOptimum;
break;
case NV_TABLE_REGULATORY_DOMAINS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.regDomains);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.regDomains;
break;
case NV_TABLE_DEFAULT_COUNTRY:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.defaultCountryTable);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.defaultCountryTable;
break;
case NV_TABLE_TPC_POWER_TABLE:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.plutCharacterized);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.plutCharacterized[0];
break;
case NV_TABLE_TPC_PDADC_OFFSETS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.plutPdadcOffset);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.plutPdadcOffset[0];
break;
case NV_TABLE_VIRTUAL_RATE:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.pwrOptimum_virtualRate);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.pwrOptimum_virtualRate[0];
break;
case NV_TABLE_RSSI_CHANNEL_OFFSETS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.rssiChanOffsets);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.rssiChanOffsets[0];
break;
case NV_TABLE_HW_CAL_VALUES:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.hwCalValues);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.hwCalValues;
break;
case NV_TABLE_FW_CONFIG:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.fwConfig);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.fwConfig;
break;
case NV_TABLE_ANTENNA_PATH_LOSS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.antennaPathLoss);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.antennaPathLoss[0];
break;
case NV_TABLE_PACKET_TYPE_POWER_LIMITS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.pktTypePwrLimits);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.pktTypePwrLimits[0][0];
break;
default:
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Not Valid NV Table %d", nvTable->nvTable);
return -EIO;
break;
}
if (pHddCtx->ftm.targetNVTableSize != nvTable->tableSize)
{
/* Invalid table size, discard and initialize data */
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_FATAL,
"Invalid Table Size %d for Table %d"
" expected size %d\n", nvTable->tableSize, nvTable->nvTable,
pHddCtx->ftm.targetNVTableSize);
pHddCtx->ftm.processingNVTable = NV_MAX_TABLE;
pHddCtx->ftm.targetNVTableSize = 0;
pHddCtx->ftm.processedNVTableSize = 0;
vos_mem_zero(pHddCtx->ftm.tempNVTableBuffer, MAX_NV_TABLE_SIZE);
return -EINVAL;
}
/* Set Current Processing NV table type */
pHddCtx->ftm.processingNVTable = nvTable->nvTable;
/* Copy target NV table value into temp context buffer */
vos_mem_copy(pHddCtx->ftm.tempNVTableBuffer,
pHddCtx->ftm.targetNVTablePointer,
pHddCtx->ftm.targetNVTableSize);
}
if (pHddCtx->ftm.processingNVTable != nvTable->nvTable)
{
/* Invalid table type */
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_FATAL,
"Invalid NV Table, now Processing %d, not %d",
pHddCtx->ftm.processingNVTable, nvTable->nvTable);
pHddCtx->ftm.processingNVTable = NV_MAX_TABLE;
pHddCtx->ftm.targetNVTableSize = 0;
pHddCtx->ftm.processedNVTableSize = 0;
vos_mem_zero(pHddCtx->ftm.tempNVTableBuffer, MAX_NV_TABLE_SIZE);
return -EINVAL;
}
/* Copy next chunk of NV table value into response buffer */
vos_mem_copy(&nvTable->tableData,
pHddCtx->ftm.tempNVTableBuffer + pHddCtx->ftm.processedNVTableSize,
nvTable->chunkSize);
/* Update processed pointer to prepare next chunk copy */
pHddCtx->ftm.processedNVTableSize += nvTable->chunkSize;
if (pHddCtx->ftm.targetNVTableSize == pHddCtx->ftm.processedNVTableSize)
{
/* Finished to process last chunk of data, initialize buffer */
pHddCtx->ftm.processingNVTable = NV_MAX_TABLE;
pHddCtx->ftm.targetNVTableSize = 0;
pHddCtx->ftm.processedNVTableSize = 0;
vos_mem_zero(pHddCtx->ftm.tempNVTableBuffer, MAX_NV_TABLE_SIZE);
}
return 1;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_set_nv_table() -
Set Specific NV table as given
\param - ftmCmd - Pointer FTM Commad Buffer
\return - int
-1, Process Host command fail, vail out
1, Process Host command success
--------------------------------------------------------------------------*/
int wlan_hdd_ftm_set_nv_table
(
hdd_context_t *pHddCtx,
tPttMsgbuffer *ftmCmd
)
{
VOS_STATUS nvStatus = VOS_STATUS_SUCCESS;
pttSetNvTable *nvTable = (pttSetNvTable *)&ftmCmd->msgBody.SetNvTable;
v_SIZE_t nvSize;
sHalNv *nvContents = NULL;
if (NULL == pHddCtx)
{
VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_FATAL,
"Not valid driver context");
return -EINVAL;
}
/* Test first chunk of NV table */
if ((NV_MAX_TABLE == pHddCtx->ftm.processingNVTable) ||
(0 == pHddCtx->ftm.processedNVTableSize))
{
nvStatus = vos_nv_getNVBuffer((void **)&nvContents, &nvSize);
if ((VOS_STATUS_SUCCESS != nvStatus) || (NULL == nvContents))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_FATAL,
"Fail to get cached NV value Status %d", nvStatus);
return -EINVAL;
}
switch (nvTable->nvTable)
{
case NV_TABLE_RATE_POWER_SETTINGS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.pwrOptimum);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.pwrOptimum;
break;
case NV_TABLE_REGULATORY_DOMAINS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.regDomains);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.regDomains;
break;
case NV_TABLE_DEFAULT_COUNTRY:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.defaultCountryTable);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.defaultCountryTable;
break;
case NV_TABLE_TPC_POWER_TABLE:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.plutCharacterized);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.plutCharacterized[0];
break;
case NV_TABLE_TPC_PDADC_OFFSETS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.plutPdadcOffset);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.plutPdadcOffset[0];
break;
case NV_TABLE_VIRTUAL_RATE:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.pwrOptimum_virtualRate);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.pwrOptimum_virtualRate[0];
break;
case NV_TABLE_RSSI_CHANNEL_OFFSETS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.rssiChanOffsets);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.rssiChanOffsets[0];
break;
case NV_TABLE_HW_CAL_VALUES:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.hwCalValues);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.hwCalValues;
break;
case NV_TABLE_FW_CONFIG:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.fwConfig);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.fwConfig;
break;
case NV_TABLE_ANTENNA_PATH_LOSS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.antennaPathLoss);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.antennaPathLoss[0];
break;
case NV_TABLE_PACKET_TYPE_POWER_LIMITS:
pHddCtx->ftm.targetNVTableSize = sizeof(nvContents->tables.pktTypePwrLimits);
pHddCtx->ftm.targetNVTablePointer = (v_U8_t *)&nvContents->tables.pktTypePwrLimits[0][0];
break;
default:
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Not Valid NV Table %d", nvTable->nvTable);
return -EIO;
break;
}
/* Set Current Processing NV table type */
pHddCtx->ftm.processingNVTable = nvTable->nvTable;
if (pHddCtx->ftm.targetNVTableSize != nvTable->tableSize)
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_FATAL,
"Invalid Table Size %d", nvTable->tableSize);
pHddCtx->ftm.processingNVTable = NV_MAX_TABLE;
pHddCtx->ftm.targetNVTableSize = 0;
pHddCtx->ftm.processedNVTableSize = 0;
vos_mem_zero(pHddCtx->ftm.tempNVTableBuffer, MAX_NV_TABLE_SIZE);
return -EINVAL;
}
}
if (pHddCtx->ftm.processingNVTable != nvTable->nvTable)
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Invalid NV Table, now Processing %d, not %d",
pHddCtx->ftm.processingNVTable, nvTable->nvTable);
pHddCtx->ftm.processingNVTable = NV_MAX_TABLE;
pHddCtx->ftm.targetNVTableSize = 0;
pHddCtx->ftm.processedNVTableSize = 0;
vos_mem_zero(pHddCtx->ftm.tempNVTableBuffer, MAX_NV_TABLE_SIZE);
return -EINVAL;
}
vos_mem_copy(pHddCtx->ftm.tempNVTableBuffer + pHddCtx->ftm.processedNVTableSize,
&nvTable->tableData,
nvTable->chunkSize);
pHddCtx->ftm.processedNVTableSize += nvTable->chunkSize;
if (pHddCtx->ftm.targetNVTableSize == pHddCtx->ftm.processedNVTableSize)
{
vos_mem_copy(pHddCtx->ftm.targetNVTablePointer,
pHddCtx->ftm.tempNVTableBuffer,
pHddCtx->ftm.targetNVTableSize);
pHddCtx->ftm.processingNVTable = NV_MAX_TABLE;
pHddCtx->ftm.targetNVTableSize = 0;
pHddCtx->ftm.processedNVTableSize = 0;
vos_mem_zero(pHddCtx->ftm.tempNVTableBuffer, MAX_NV_TABLE_SIZE);
}
return 1;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_blank_nv() -
Set all NV table value as default
\param - ftmCmd - Pointer FTM Commad Buffer
\return - int
-1, Process Host command fail, vail out
0, Process Host command success
--------------------------------------------------------------------------*/
int wlan_hdd_ftm_blank_nv_table
(
tPttMsgbuffer *ftmCmd
)
{
VOS_STATUS nvStatus = VOS_STATUS_SUCCESS;
v_SIZE_t nvSize;
v_SIZE_t itemSize;
sHalNv *nvContents = NULL;
nvStatus = vos_nv_getNVBuffer((void **)&nvContents, &nvSize);
if((VOS_STATUS_SUCCESS != nvStatus) || (NULL == nvContents))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_INFO,
"Fail to get cached NV value Status %d", nvStatus);
return -EIO;
}
itemSize = sizeof(nvContents->tables.pwrOptimum);
memcpy(&nvContents->tables.pwrOptimum,
&nvDefaults.tables.pwrOptimum,
itemSize);
itemSize = sizeof(nvContents->tables.regDomains);
memcpy(&nvContents->tables.regDomains,
&nvDefaults.tables.regDomains,
itemSize);
itemSize = sizeof(nvContents->tables.defaultCountryTable);
memcpy(&nvContents->tables.defaultCountryTable,
&nvDefaults.tables.defaultCountryTable,
itemSize);
itemSize = sizeof(nvContents->tables.plutCharacterized);
memcpy(&nvContents->tables.plutCharacterized[0],
&nvDefaults.tables.plutCharacterized[0],
itemSize);
itemSize = sizeof(nvContents->tables.plutPdadcOffset);
memcpy(&nvContents->tables.plutPdadcOffset[0],
&nvDefaults.tables.plutPdadcOffset[0],
itemSize);
itemSize = sizeof(nvContents->tables.pwrOptimum_virtualRate);
memcpy(&nvContents->tables.pwrOptimum_virtualRate[0],
&nvDefaults.tables.pwrOptimum_virtualRate[0],
itemSize);
itemSize = sizeof(nvContents->tables.rssiChanOffsets);
memcpy(&nvContents->tables.rssiChanOffsets[0],
&nvDefaults.tables.rssiChanOffsets[0],
itemSize);
itemSize = sizeof(nvContents->tables.hwCalValues);
memcpy(&nvContents->tables.hwCalValues,
&nvDefaults.tables.hwCalValues,
itemSize);
itemSize = sizeof(nvContents->tables.antennaPathLoss);
memcpy(&nvContents->tables.antennaPathLoss[0],
&nvDefaults.tables.antennaPathLoss[0],
itemSize);
itemSize = sizeof(nvContents->tables.pktTypePwrLimits);
memcpy(&nvContents->tables.pktTypePwrLimits[0][0],
&nvDefaults.tables.pktTypePwrLimits[0][0],
itemSize);
return 1;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_delete_nv_table() -
Delete Specific NV table
\param - ftmCmd - Pointer FTM Commad Buffer
\return - int
-1, Process Host command fail, vail out
1, Process Host command success
--------------------------------------------------------------------------*/
int wlan_hdd_ftm_delete_nv_table
(
tPttMsgbuffer *ftmCmd
)
{
VOS_STATUS nvStatus = VOS_STATUS_SUCCESS;
tMsgPttDelNvTable *nvTable = (tMsgPttDelNvTable *)&ftmCmd->msgBody.DelNvTable;
v_SIZE_t nvSize;
v_SIZE_t itemSize;
sHalNv *nvContents = NULL;
nvStatus = vos_nv_getNVBuffer((void **)&nvContents, &nvSize);
if ((VOS_STATUS_SUCCESS != nvStatus) || (NULL == nvContents))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_INFO,
"Fail to get cached NV value Status %d", nvStatus);
return -EIO;
}
switch (nvTable->nvTable)
{
case NV_TABLE_RATE_POWER_SETTINGS:
itemSize = sizeof(nvContents->tables.pwrOptimum);
memcpy(&nvContents->tables.pwrOptimum,
&nvDefaults.tables.pwrOptimum,
itemSize);
break;
case NV_TABLE_REGULATORY_DOMAINS:
itemSize = sizeof(nvContents->tables.regDomains);
memcpy(&nvContents->tables.regDomains,
&nvDefaults.tables.regDomains,
itemSize);
break;
case NV_TABLE_DEFAULT_COUNTRY:
itemSize = sizeof(nvContents->tables.defaultCountryTable);
memcpy(&nvContents->tables.defaultCountryTable,
&nvDefaults.tables.defaultCountryTable,
itemSize);
break;
case NV_TABLE_TPC_POWER_TABLE:
itemSize = sizeof(nvContents->tables.plutCharacterized);
memcpy(&nvContents->tables.plutCharacterized[0],
&nvDefaults.tables.plutCharacterized[0],
itemSize);
break;
case NV_TABLE_TPC_PDADC_OFFSETS:
itemSize = sizeof(nvContents->tables.plutPdadcOffset);
memcpy(&nvContents->tables.plutPdadcOffset[0],
&nvDefaults.tables.plutPdadcOffset[0],
itemSize);
break;
case NV_TABLE_VIRTUAL_RATE:
itemSize = sizeof(nvContents->tables.pwrOptimum_virtualRate);
memcpy(&nvContents->tables.pwrOptimum_virtualRate[0],
&nvDefaults.tables.pwrOptimum_virtualRate[0],
itemSize);
break;
case NV_TABLE_RSSI_CHANNEL_OFFSETS:
itemSize = sizeof(nvContents->tables.rssiChanOffsets);
memcpy(&nvContents->tables.rssiChanOffsets[0],
&nvDefaults.tables.rssiChanOffsets[0],
itemSize);
break;
case NV_TABLE_HW_CAL_VALUES:
itemSize = sizeof(nvContents->tables.hwCalValues);
memcpy(&nvContents->tables.hwCalValues,
&nvDefaults.tables.hwCalValues,
itemSize);
break;
case NV_TABLE_FW_CONFIG:
itemSize = sizeof(nvContents->tables.fwConfig);
memcpy(&nvContents->tables.fwConfig,
&nvDefaults.tables.fwConfig,
itemSize);
break;
case NV_TABLE_ANTENNA_PATH_LOSS:
itemSize = sizeof(nvContents->tables.antennaPathLoss);
memcpy(&nvContents->tables.antennaPathLoss[0],
&nvDefaults.tables.antennaPathLoss[0],
itemSize);
break;
case NV_TABLE_PACKET_TYPE_POWER_LIMITS:
itemSize = sizeof(nvContents->tables.pktTypePwrLimits);
memcpy(&nvContents->tables.pktTypePwrLimits[0][0],
&nvDefaults.tables.pktTypePwrLimits[0][0],
itemSize);
break;
default:
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Not Valid NV Table %d", nvTable->nvTable);
return -EIO;
break;
}
return 1;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_get_nv_field() -
Get Specific NV field
\param - ftmCmd - Pointer FTM Commad Buffer
\return - int
-1, Process Host command fail, vail out
1, Process Host command success
--------------------------------------------------------------------------*/
int wlan_hdd_ftm_get_nv_field
(
tPttMsgbuffer *ftmCmd
)
{
sNvFields nvFieldDataBuffer;
tMsgPttGetNvField *nvField = (tMsgPttGetNvField *)&ftmCmd->msgBody.GetNvField;
VOS_STATUS nvStatus = VOS_STATUS_SUCCESS;
sHalNv *nvContents = NULL;
v_SIZE_t nvSize;
nvStatus = vos_nv_getNVBuffer((void **)&nvContents, &nvSize);
if ((VOS_STATUS_SUCCESS != nvStatus) || (NULL == nvContents))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_INFO,
"Fail to get cached NV value Status %d", nvStatus);
return -EIO;
}
memcpy(&nvFieldDataBuffer, &nvContents->fields, sizeof(sNvFields));
switch (nvField->nvField)
{
case NV_COMMON_PRODUCT_ID:
memcpy((void *)&nvField->fieldData,
&nvFieldDataBuffer.productId,
sizeof(nvFieldDataBuffer.productId));
break;
case NV_COMMON_PRODUCT_BANDS:
memcpy((void *)&nvField->fieldData,
&nvFieldDataBuffer.productBands,
sizeof(nvFieldDataBuffer.productBands));
break;
case NV_COMMON_NUM_OF_TX_CHAINS:
memcpy((void *)&nvField->fieldData,
&nvFieldDataBuffer.numOfTxChains,
sizeof(nvFieldDataBuffer.numOfTxChains));
break;
case NV_COMMON_NUM_OF_RX_CHAINS:
memcpy((void *)&nvField->fieldData,
&nvFieldDataBuffer.numOfRxChains,
sizeof(nvFieldDataBuffer.numOfRxChains));
break;
case NV_COMMON_MAC_ADDR:
memcpy((void *)&nvField->fieldData,
&nvFieldDataBuffer.macAddr[0],
NV_FIELD_MAC_ADDR_SIZE);
break;
case NV_COMMON_MFG_SERIAL_NUMBER:
memcpy((void *)&nvField->fieldData,
&nvFieldDataBuffer.mfgSN[0],
NV_FIELD_MFG_SN_SIZE);
break;
case NV_COMMON_WLAN_NV_REV_ID:
memcpy((void *)&nvField->fieldData,
&nvFieldDataBuffer.wlanNvRevId,
sizeof(nvFieldDataBuffer.wlanNvRevId));
break;
case NV_COMMON_COUPLER_TYPE:
memcpy((void *)&nvField->fieldData,
&nvFieldDataBuffer.couplerType,
sizeof(nvFieldDataBuffer.couplerType));
break;
case NV_COMMON_NV_VERSION:
memcpy((void *)&nvField->fieldData,
&nvFieldDataBuffer.nvVersion,
sizeof(nvFieldDataBuffer.nvVersion));
break;
default:
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Not Valid NV field %d", nvField->nvField);
return -EIO;
break;
}
return 1;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_set_nv_field() -
Set Specific NV field
\param - ftmCmd - Pointer FTM Commad Buffer
\return - int
-1, Process Host command fail, vail out
1, Process Host command success
--------------------------------------------------------------------------*/
int wlan_hdd_ftm_set_nv_field
(
tPttMsgbuffer *ftmCmd
)
{
tMsgPttSetNvField *nvField = (tMsgPttSetNvField *)&ftmCmd->msgBody.SetNvField;
VOS_STATUS nvStatus = VOS_STATUS_SUCCESS;
v_SIZE_t nvSize;
sHalNv *nvContents = NULL;
v_U8_t macLoop;
v_U8_t *pNVMac;
v_U8_t lastByteMAC;
nvStatus = vos_nv_getNVBuffer((void **)&nvContents, &nvSize);
if((VOS_STATUS_SUCCESS != nvStatus) || (NULL == nvContents))
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_INFO,
"Fail to get cached NV value Status %d", nvStatus);
return -EIO;
}
switch (nvField->nvField)
{
case NV_COMMON_PRODUCT_ID:
memcpy(&nvContents->fields.productId,
&nvField->fieldData,
sizeof(nvContents->fields.productId));
break;
case NV_COMMON_PRODUCT_BANDS:
memcpy(&nvContents->fields.productBands,
&nvField->fieldData,
sizeof(nvContents->fields.productBands));
break;
case NV_COMMON_NUM_OF_TX_CHAINS:
memcpy(&nvContents->fields.numOfTxChains,
&nvField->fieldData,
sizeof(nvContents->fields.numOfTxChains));
break;
case NV_COMMON_NUM_OF_RX_CHAINS:
memcpy(&nvContents->fields.numOfRxChains,
&nvField->fieldData,
sizeof(nvContents->fields.numOfRxChains));
break;
case NV_COMMON_MAC_ADDR:
/* If Last byte is larger than 252 (0xFC), return Error,
* Since 3MACs should be derived from first MAC */
if(QWLAN_MAX_MAC_LAST_BYTE_VALUE <
nvField->fieldData.macAddr[VOS_MAC_ADDRESS_LEN - 1])
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Last Byte of the seed MAC is too large 0x%x",
nvField->fieldData.macAddr[VOS_MAC_ADDRESS_LEN - 1]);
return -EILSEQ;
}
pNVMac = (v_U8_t *)nvContents->fields.macAddr;
lastByteMAC = nvField->fieldData.macAddr[VOS_MAC_ADDRESS_LEN - 1];
for(macLoop = 0; macLoop < VOS_MAX_CONCURRENCY_PERSONA; macLoop++)
{
nvField->fieldData.macAddr[VOS_MAC_ADDRESS_LEN - 1] =
lastByteMAC + macLoop;
vos_mem_copy(pNVMac + (macLoop * NV_FIELD_MAC_ADDR_SIZE),
&nvField->fieldData.macAddr[0],
NV_FIELD_MAC_ADDR_SIZE);
}
break;
case NV_COMMON_MFG_SERIAL_NUMBER:
memcpy(&nvContents->fields.mfgSN[0],
&nvField->fieldData,
NV_FIELD_MFG_SN_SIZE);
break;
case NV_COMMON_WLAN_NV_REV_ID:
memcpy(&nvContents->fields.wlanNvRevId,
&nvField->fieldData,
sizeof(nvContents->fields.wlanNvRevId));
break;
case NV_COMMON_COUPLER_TYPE:
memcpy(&nvContents->fields.couplerType,
&nvField->fieldData,
sizeof(nvContents->fields.couplerType));
break;
case NV_COMMON_NV_VERSION:
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Cannot modify NV version field %d", nvField->nvField);
return -EIO;
break;
default:
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Not Valid NV field %d", nvField->nvField);
return -EIO;
break;
}
return 1;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_store_nv_table() -
Store Cached NV information into Flash Memory, file
\param - ftmCmd - Pointer FTM Commad Buffer
\return - int
-1, Process Host command fail, vail out
0, Process Host command success
--------------------------------------------------------------------------*/
int wlan_hdd_ftm_store_nv_table
(
tPttMsgbuffer *ftmCmd
)
{
VOS_STATUS nvStatus = VOS_STATUS_SUCCESS;
v_SIZE_t nvSize;
sHalNv *nvContents = NULL;
tMsgPttStoreNvTable *nvTable = (tMsgPttStoreNvTable *)&ftmCmd->msgBody.StoreNvTable;
void *tablePtr = NULL;
unsigned int tableSize = 0;
VNV_TYPE tableVNVType = VNV_FIELD_IMAGE;
nvStatus = vos_nv_getNVBuffer((void **)&nvContents, &nvSize);
if((VOS_STATUS_SUCCESS != nvStatus) || (NULL == nvContents))
{
return -EIO;
}
/* Set Platform type as PRIMA */
nvContents->fields.wlanNvRevId = 2;
switch(nvTable->nvTable)
{
case NV_FIELDS_IMAGE:
tablePtr = (void *)&nvContents->fields;
tableSize = sizeof(nvContents->fields);
tableVNVType = VNV_FIELD_IMAGE;
break;
case NV_TABLE_RATE_POWER_SETTINGS:
tablePtr = (void *)&nvContents->tables.pwrOptimum[0];
tableSize = sizeof(nvContents->tables.pwrOptimum);
tableVNVType = VNV_RATE_TO_POWER_TABLE;
break;
case NV_TABLE_REGULATORY_DOMAINS:
tablePtr = (void *)&nvContents->tables.regDomains[0];
tableSize = sizeof(nvContents->tables.regDomains);
tableVNVType = VNV_REGULARTORY_DOMAIN_TABLE;
break;
case NV_TABLE_DEFAULT_COUNTRY:
tablePtr = (void *)&nvContents->tables.defaultCountryTable;
tableSize = sizeof(nvContents->tables.defaultCountryTable);
tableVNVType = VNV_DEFAULT_LOCATION;
break;
case NV_TABLE_TPC_POWER_TABLE:
tablePtr = (void *)&nvContents->tables.plutCharacterized[0];
tableSize = sizeof(nvContents->tables.plutCharacterized);
tableVNVType = VNV_TPC_POWER_TABLE;
break;
case NV_TABLE_TPC_PDADC_OFFSETS:
tablePtr = (void *)&nvContents->tables.plutPdadcOffset[0];
tableSize = sizeof(nvContents->tables.plutPdadcOffset);
tableVNVType = VNV_TPC_PDADC_OFFSETS;
break;
case NV_TABLE_VIRTUAL_RATE:
tablePtr = (void *)&nvContents->tables.pwrOptimum_virtualRate[0];
tableSize = sizeof(nvContents->tables.pwrOptimum_virtualRate);
tableVNVType = VNV_TABLE_VIRTUAL_RATE;
break;
case NV_TABLE_RSSI_CHANNEL_OFFSETS:
tablePtr = (void *)&nvContents->tables.rssiChanOffsets[0];
tableSize = sizeof(nvContents->tables.rssiChanOffsets);
tableVNVType = VNV_RSSI_CHANNEL_OFFSETS;
break;
case NV_TABLE_HW_CAL_VALUES:
tablePtr = (void *)&nvContents->tables.hwCalValues;
tableSize = sizeof(nvContents->tables.hwCalValues);
tableVNVType = VNV_HW_CAL_VALUES;
break;
case NV_TABLE_FW_CONFIG:
tablePtr = (void *)&nvContents->tables.fwConfig;
tableSize = sizeof(nvContents->tables.fwConfig);
tableVNVType = VNV_FW_CONFIG;
break;
case NV_TABLE_ANTENNA_PATH_LOSS:
tablePtr = (void *)&nvContents->tables.antennaPathLoss[0];
tableSize = sizeof(nvContents->tables.antennaPathLoss);
tableVNVType = VNV_ANTENNA_PATH_LOSS;
break;
case NV_TABLE_PACKET_TYPE_POWER_LIMITS:
tablePtr = (void *)&nvContents->tables.pktTypePwrLimits[0][0];
tableSize = sizeof(nvContents->tables.pktTypePwrLimits);
tableVNVType = VNV_PACKET_TYPE_POWER_LIMITS;
break;
default:
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Not Supported Table Type %d", nvTable->nvTable);
return -EIO;
break;
}
nvStatus = vos_nv_write(tableVNVType,
tablePtr,
tableSize);
if(VOS_STATUS_SUCCESS != nvStatus)
{
return -EIO;
}
return 1;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_ftm_temp_get_rel_num() -
Get internal release number
\param - ftmCmd - Pointer FTM Commad Buffer
\return - int
-1, Process Host command fail, vail out
0, Process Host command success
--------------------------------------------------------------------------*/
int wlan_hdd_ftm_temp_get_rel_num
(
tPttMsgbuffer *ftmCmd
)
{
tMsgPttGetBuildReleaseNumber *relNum = (tMsgPttGetBuildReleaseNumber *)&ftmCmd->msgBody.GetBuildReleaseNumber;
relNum->relParams.drvMjr = QWLAN_VERSION_MAJOR;
relNum->relParams.drvMnr = QWLAN_VERSION_MINOR;
relNum->relParams.drvPtch = QWLAN_VERSION_PATCH;
relNum->relParams.drvBld = QWLAN_VERSION_BUILD;
relNum->relParams.pttMax = 10;
relNum->relParams.pttMin = 1;
return 1;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_process_ftm_host_cmd() -
process any command should be handled within host.
decide any command should be send to HAL or not
\param - ftmCmd - Pointer FTM Commad Buffer
\return - int
< 0, Process Host command fail, bail out
0, Process Host command success, not need to send CMD to HAL
1, Process Host command success, need to send CMD to HAL
--------------------------------------------------------------------------*/
int wlan_hdd_process_ftm_host_cmd
(
hdd_context_t *pHddCtx,
void *ftmCmd
)
{
tPttMsgbuffer *pFTMCmd = (tPttMsgbuffer *)ftmCmd;
int needToRouteHal = 1;
int hostState = 1;
switch(pFTMCmd->msgId)
{
case PTT_MSG_GET_NV_TABLE:
hostState = wlan_hdd_ftm_get_nv_table(pHddCtx, pFTMCmd);
needToRouteHal = 0;
break;
case PTT_MSG_SET_NV_TABLE:
hostState = wlan_hdd_ftm_set_nv_table(pHddCtx, pFTMCmd);
/* Temp NV Operation will be isolated to host
needToRouteHal = 1; */
needToRouteHal = 0;
break;
case PTT_MSG_BLANK_NV:
hostState = wlan_hdd_ftm_blank_nv_table(pFTMCmd);
needToRouteHal = 1;
break;
case PTT_MSG_DEL_NV_TABLE:
hostState = wlan_hdd_ftm_delete_nv_table(pFTMCmd);
needToRouteHal = 1;
break;
case PTT_MSG_GET_NV_FIELD:
hostState = wlan_hdd_ftm_get_nv_field(pFTMCmd);
needToRouteHal = 0;
break;
case PTT_MSG_SET_NV_FIELD:
hostState = wlan_hdd_ftm_set_nv_field(pFTMCmd);
needToRouteHal = 0;
break;
case PTT_MSG_STORE_NV_TABLE:
hostState = wlan_hdd_ftm_store_nv_table(pFTMCmd);
needToRouteHal = 0;
break;
case PTT_MSG_DBG_READ_REGISTER:
wpalReadRegister(pFTMCmd->msgBody.DbgReadRegister.regAddr,
&pFTMCmd->msgBody.DbgReadRegister.regValue);
needToRouteHal = 0;
break;
case PTT_MSG_DBG_WRITE_REGISTER:
wpalWriteRegister(pFTMCmd->msgBody.DbgWriteRegister.regAddr,
pFTMCmd->msgBody.DbgWriteRegister.regValue);
needToRouteHal = 0;
break;
case PTT_MSG_DBG_READ_MEMORY:
wpalReadDeviceMemory(pFTMCmd->msgBody.DbgReadMemory.memAddr,
(unsigned char *)pFTMCmd->msgBody.DbgReadMemory.pMemBuf,
pFTMCmd->msgBody.DbgReadMemory.nBytes);
needToRouteHal = 0;
break;
case PTT_MSG_DBG_WRITE_MEMORY:
wpalWriteDeviceMemory(pFTMCmd->msgBody.DbgWriteMemory.memAddr,
(unsigned char *)pFTMCmd->msgBody.DbgWriteMemory.pMemBuf,
pFTMCmd->msgBody.DbgWriteMemory.nBytes);
needToRouteHal = 0;
break;
case PTT_MSG_GET_BUILD_RELEASE_NUMBER:
wlan_hdd_ftm_temp_get_rel_num(pFTMCmd);
needToRouteHal = 0;
break;
default:
needToRouteHal = 1;
break;
}
if( 0 > hostState)
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"Host Command Handle Fail, Bailout");
return hostState;
}
return needToRouteHal;
}
/**---------------------------------------------------------------------------
\brief wlan_hdd_process_ftm_cmd() -
This function process the commands received from the ptt socket application.
\param - pAdapter - Pointer HDD Context.
\param - wnl - Pointer to the ANI netlink header.
\return - none
--------------------------------------------------------------------------*/
void wlan_hdd_process_ftm_cmd
(
hdd_context_t *pHddCtx,
tAniNlHdr *wnl
)
{
wlan_hdd_ftm_request_t *pRequestBuf = (wlan_hdd_ftm_request_t*)(((v_U8_t*)(&wnl->wmsg))+sizeof(tAniHdr)) ;
v_U16_t cmd_len;
v_U8_t *pftm_data;
pVosContextType pVosContext = (pVosContextType)(pHddCtx->pvosContext);
int hostState;
tPttMsgbuffer *tempRspBuffer = NULL;
ENTER();
//Delay to fix NV write failure on JB
vos_busy_wait(10000); //10ms
if (!pRequestBuf) {
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: request buffer is null\n",__func__);
return ;
}
/*Save the received request*/
pHddCtx->ftm.pRequestBuf = pRequestBuf;
pHddCtx->ftm.pResponseBuf = (wlan_hdd_ftm_response_t*)pRequestBuf;
/*Save the received request netlink header used for sending the response*/
pHddCtx->ftm.wnl = wnl;
if (pRequestBuf->module_type != QUALCOMM_MODULE_TYPE) {
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: Invalid Module Type =%d\n",__func__,pRequestBuf->module_type);
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_FAILURE;
wlan_ftm_send_response(pHddCtx);
return ;
}
switch (pRequestBuf->ftmpkt.ftm_cmd_type)
{
case WLAN_FTM_START:
if (pHddCtx->ftm.ftm_state == WLAN_FTM_STARTED) {
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: FTM has already started =%d\n",__func__,pRequestBuf->ftmpkt.ftm_cmd_type);
pHddCtx->ftm.pResponseBuf->ftm_hdr.data_len -= 1;
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_SUCCESS;
wlan_ftm_send_response(pHddCtx);
return;
}
if (wlan_hdd_ftm_start(pVosContext->pHDDContext) != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: : Failed to start WLAN FTM"
,__func__);
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_FAILURE;
wlan_ftm_send_response(pHddCtx);
return;
}
/* Ptt application running on the host PC expects the length to be one byte less that what we have received*/
pHddCtx->ftm.pResponseBuf->ftm_hdr.data_len -= 1;
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_SUCCESS;
pHddCtx->ftm.pResponseBuf->ftmpkt.ftm_cmd_type = 0;
wlan_ftm_send_response(pHddCtx);
break;
case WLAN_FTM_STOP:
if (pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED) {
hddLog(VOS_TRACE_LEVEL_ERROR,"%s:: FTM has not started\n",__func__);
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_SUCCESS;
wlan_ftm_send_response(pHddCtx);
return;
}
if (VOS_STATUS_SUCCESS != wlan_ftm_stop(pHddCtx)) {
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_FAILURE;
wlan_ftm_send_response(pHddCtx);
return;
}
pHddCtx->ftm.ftm_state = WLAN_FTM_STOPPED;
/* This would send back the Command Success Status */
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_SUCCESS;
wlan_ftm_send_response(pHddCtx);
break;
case WLAN_FTM_CMD:
/* if it is regular FTM command, pass it to HAL PHY */
if(pHddCtx->ftm.IsCmdPending == TRUE) {
hddLog(VOS_TRACE_LEVEL_ERROR,"%s:: FTM command pending for process\n",__func__);
return;
}
if (pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED) {
hddLog(VOS_TRACE_LEVEL_ERROR,"%s:: FTM has not started\n",__func__);
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_FAILURE;
wlan_ftm_send_response(pHddCtx);
return;
}
vos_event_reset(&pHddCtx->ftm.ftm_vos_event);
cmd_len = pRequestBuf->ftm_hdr.data_len;
cmd_len -= (sizeof(wlan_hdd_ftm_request_t)- sizeof(pRequestBuf->ftmpkt.ftm_cmd_type));
pftm_data = pRequestBuf->ftmpkt.pFtmCmd;
hostState = wlan_hdd_process_ftm_host_cmd(pHddCtx, pftm_data);
if (0 == hostState)
{
tempRspBuffer = (tPttMsgbuffer *)vos_mem_malloc(((tPttMsgbuffer *)pftm_data)->msgBodyLength);
if (NULL == tempRspBuffer)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s:: temp Mem Alloc Fail\n",__func__);
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_FAILURE;
wlan_ftm_send_response(pHddCtx);
return;
}
memcpy(tempRspBuffer, pftm_data, ((tPttMsgbuffer *)pftm_data)->msgBodyLength);
tempRspBuffer->msgResponse = PTT_STATUS_SUCCESS;
memcpy((unsigned char *)&pHddCtx->ftm.pResponseBuf->ftmpkt,
(unsigned char *) tempRspBuffer,
tempRspBuffer->msgBodyLength);
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_SUCCESS;
wlan_ftm_send_response(pHddCtx);
vos_mem_free(tempRspBuffer);
return;
}
else if (0 > hostState)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "*** Host Command Handle Fail ***");
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_FAILURE;
wlan_ftm_send_response(pHddCtx);
return;
}
//HEXDUMP("Request:",(char*)pftm_data,cmd_len);
pHddCtx->ftm.IsCmdPending = TRUE;
/*Post the command to the HAL*/
if (wlan_ftm_postmsg(pftm_data, cmd_len) != VOS_STATUS_SUCCESS) {
hddLog(VOS_TRACE_LEVEL_ERROR,"%s:: FTM command failed\n",__func__);
return;
}
/*Wait here until you get the response from HAL*/
if (vos_wait_single_event(&pHddCtx->ftm.ftm_vos_event, FTM_VOS_EVENT_WAIT_TIME)!= VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: vos_wait_single_event failed",__func__);
return;
}
cmd_len = be16_to_cpu(pHddCtx->ftm.wnl->wmsg.length);
//HEXDUMP("Response to QXDM:", (char *)&pAdapter->ftm.wnl->wmsg, cmd_len);
wlan_ftm_send_response(pHddCtx);
pHddCtx->ftm.IsCmdPending = FALSE;
break;
default:
hddLog(VOS_TRACE_LEVEL_ERROR,"%s:: Command not supported \n",__func__);
return;
}
EXIT();
return;
} /* wlan_adp_ftm_cmd() */
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_start_stop_ftm() -
This function is used for start/stop the ftm driver.
\param - pAdapter - Pointer HDD Context.
- start - 1/0 to start/stop ftm driver.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_start_stop_ftm(hdd_adapter_t *pAdapter,
v_U16_t start)
{
VOS_STATUS status;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if (start)
{
pHddCtx->ftm.cmd_iwpriv = TRUE;
status = wlan_hdd_ftm_start(pHddCtx);
if (status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"FTM Start Failed");
return VOS_STATUS_E_FAILURE;
}
}
else
{
status = wlan_ftm_stop(pHddCtx);
if (status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"FTM Stop Failed");
return VOS_STATUS_E_FAILURE;
}
}
return VOS_STATUS_SUCCESS;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_set_channel() -
This function is used for setting the channel to the halphy ptt module.
\param - pAdapter - Pointer HDD Context.
- channel - Channel Number 1-14.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_set_channel(hdd_adapter_t *pAdapter,v_U16_t channel)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
if(!(channel >= 1 && channel <= 14))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Invalid Channel Number. ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_SET_CHANNEL;
pMsgBuf->msgBodyLength = sizeof(tMsgPttSetChannel) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->SetChannel.chId = channel;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Channel =%d\n",pMsgBody->SetChannel.chId);
pMsgBody->SetChannel.cbState = PHY_SINGLE_CHANNEL_CENTERED;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_set_txpower() -
This function is used for setting the txpower to the halphy ptt module.
\param - pAdapter - Pointer HDD Context.
- txpower - txpower Number 1-18.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_set_txpower(hdd_adapter_t *pAdapter,v_U16_t txpower)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
/* do not allow to change setting when tx pktgen is enabled, although halphy does allow changing tx power
* when tx pktgen is enabled
*/
if (ftm_status.frameGenEnabled)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:cannot set txpower when pktgen is enabled.",__func__);
return VOS_STATUS_E_FAILURE;
}
if(!(txpower >= 9 && txpower <= 24))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Invalid tx power. ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_CLOSE_TPC_LOOP;
pMsgBuf->msgBodyLength = sizeof(tMsgPttCloseTpcLoop) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->CloseTpcLoop.tpcClose = TRUE;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_SET_TX_POWER;
pMsgBuf->msgBodyLength = sizeof(tMsgPttSetTxPower) + PTT_HEADER_LENGTH;
pMsgBody->SetTxPower.dbmPwr = txpower*100;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
ftm_status.txpower = txpower ;
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_set_txrate() -
This function is used for setting the txrate to the halphy ptt module.
It converts the user input string for txrate to the tx rate index.
\param - pAdapter - Pointer HDD Context.
- txrate - Pointer to the tx rate string.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_set_txrate(hdd_adapter_t *pAdapter,char *txrate)
{
int ii;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm.",__func__);
return VOS_STATUS_E_FAILURE;
}
/* do not allow to change setting when tx pktgen is enabled */
if (ftm_status.frameGenEnabled)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:cannot set txrate when pktgen is enabled.",__func__);
return VOS_STATUS_E_FAILURE;
}
for(ii = 0; ii < SIZE_OF_TABLE(rateName_rateIndex_tbl); ii++)
{
if(!strcmp(rateName_rateIndex_tbl[ii].rate_str,txrate))
break;
}
if(ii >= SIZE_OF_TABLE(rateName_rateIndex_tbl))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Invalid Rate String\n",__func__);
return VOS_STATUS_E_FAILURE;
}
ftm_status.frameParams.rate = rateName_rateIndex_tbl[ii].rate_index;
ftm_status.frameParams.preamble = rate_index_2_preamble_table[rateName_rateIndex_tbl[ii].rate_index].Preamble;
return VOS_STATUS_SUCCESS;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_start_stop_tx_pktgen() -
This function is used for start/stop the tx packet generation.
\param - pAdapter - Pointer HDD Context.
- startStop - Value( 1/0) start/stop the tx packet generation.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_start_stop_tx_pktgen(hdd_adapter_t *pAdapter,v_U16_t startStop)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
if(startStop != 1 && startStop != 0)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Tx value is invalid ",__func__);
return VOS_STATUS_E_FAILURE;
}
if ((ftm_status.frameGenEnabled && startStop == 1) ||
(!ftm_status.frameGenEnabled && startStop == 0))
{
return VOS_STATUS_SUCCESS ;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
if (startStop == 1)
{
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_CONFIG_TX_PACKET_GEN;
pMsgBuf->msgBodyLength = sizeof(tMsgPttConfigTxPacketGen) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->ConfigTxPacketGen.frameParams = ftm_status.frameParams ;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:posting PTT_MSG_CONFIG_TX_PACKET_GEN failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s: PTT_MSG_CONFIG_TX_PACKET_GEN failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_START_STOP_TX_PACKET_GEN;
pMsgBuf->msgBodyLength = sizeof(tMsgPttStartStopTxPacketGen) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->StartStopTxPacketGen.startStop = startStop;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
if (status == VOS_STATUS_SUCCESS)
{
if (startStop == 1)
{
ftm_status.frameGenEnabled = eANI_BOOLEAN_TRUE ;
}
else
{
ftm_status.frameGenEnabled = eANI_BOOLEAN_FALSE ;
}
}
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_rx_mode() -
This function is used for start/stop the rx packet generation.
\param - pAdapter - Pointer HDD Context.
- rxmode - 0-disable RX.
- 1-rx ALL frames
- 2-rx 11 g/n frames
- 3-rx 11b frames
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_rx_mode(hdd_adapter_t *pAdapter,v_U16_t rxmode)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
if(rxmode > 3)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Rx mode value is invalid ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_SET_RX_DISABLE_MODE;
pMsgBuf->msgBodyLength = sizeof(tMsgPttSetRxDisableMode) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
switch(rxmode)
{
case RXMODE_DISABLE_ALL:
pMsgBody->SetRxDisableMode.disabled.agPktsDisabled = VOS_TRUE;
pMsgBody->SetRxDisableMode.disabled.bPktsDisabled = VOS_TRUE;
pMsgBody->SetRxDisableMode.disabled.slrPktsDisabled= VOS_TRUE;
break;
case RXMODE_ENABLE_ALL:
pMsgBody->SetRxDisableMode.disabled.agPktsDisabled = VOS_FALSE;
pMsgBody->SetRxDisableMode.disabled.bPktsDisabled = VOS_FALSE;
pMsgBody->SetRxDisableMode.disabled.slrPktsDisabled= VOS_FALSE;
break;
case RXMODE_ENABLE_11GN:
pMsgBody->SetRxDisableMode.disabled.agPktsDisabled = VOS_FALSE;
pMsgBody->SetRxDisableMode.disabled.bPktsDisabled = VOS_TRUE;
pMsgBody->SetRxDisableMode.disabled.slrPktsDisabled= VOS_TRUE;
break;
case RXMODE_ENABLE_11B:
pMsgBody->SetRxDisableMode.disabled.agPktsDisabled = VOS_TRUE;
pMsgBody->SetRxDisableMode.disabled.bPktsDisabled = VOS_FALSE;
pMsgBody->SetRxDisableMode.disabled.slrPktsDisabled= VOS_TRUE;
break;
}
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
ftm_status.rxmode = rxmode ;
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_rx_pkt_clear() -
This function sets the rx pkt count to zero.
\param - pAdapter - Pointer HDD Context.
- rx_pkt_clear - rx_pkt_clear value.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_rx_pkt_clear(hdd_adapter_t *pAdapter,v_U16_t rx_pkt_clear)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
if(rx_pkt_clear != 1)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Invalid rx_pkt_clear value ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_RESET_RX_PACKET_STATISTICS;
pMsgBuf->msgBodyLength = sizeof(tMsgPttResetRxPacketStatistics) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->ResetRxPacketStatistics.notUsed= rx_pkt_clear;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_get_channel() -
This function gets the channel number from the halphy ptt module and
returns the channel number to the application.
\param - pAdapter - Pointer HDD Context.
- pChannel - Poniter to get the Channel number.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_get_channel(hdd_adapter_t *pAdapter,v_U16_t *pChannel)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
v_U16_t freq;
v_U8_t indx=0;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_DBG_READ_REGISTER;
pMsgBuf->msgBodyLength = sizeof(tMsgPttDbgReadRegister) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->DbgReadRegister.regAddr = QWLAN_AGC_CHANNEL_FREQ_REG;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
freq = ((v_U16_t)pMsgBody->DbgReadRegister.regValue & QWLAN_AGC_CHANNEL_FREQ_FREQ_MASK);
while ((indx < SIZE_OF_TABLE(freq_chan_tbl)) && (freq != freq_chan_tbl[indx].freq))
indx++;
if (indx >= SIZE_OF_TABLE(freq_chan_tbl))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Invalid Frequency!!!",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
*pChannel = freq_chan_tbl[indx].chan;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Channel = %d freq = %d\n",*pChannel, freq);
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_get_txpower() -
This function gets the TX power from the halphy ptt module and
returns the TX power to the application.
\param - pAdapter - Pointer HDD Context.
- pTxPwr - Poniter to get the Tx power.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_get_txpower(hdd_adapter_t *pAdapter,v_U16_t *pTxPwr)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_GET_TX_POWER_REPORT;
pMsgBuf->msgBodyLength = sizeof(tMsgPttGetTxPowerReport) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s: PTT_MSG_GET_TX_POWER_REPORT failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
*pTxPwr = ((((pMsgBody->GetTxPowerReport.pwrTemplateIndex & 0x1F) + 4)*50)/100);
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_get_ftm_version() -
This function gets ftm driver and firmware version.
\param - pAdapter - Pointer HDD Context.
- pTxRate - Poniter to get the Tx rate.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
VOS_STATUS wlan_ftm_priv_get_ftm_version(hdd_adapter_t *pAdapter,char *pftmVer)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
v_U32_t reg_val;
char *buf = pftmVer;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
int lenRes = 0;
int lenBuf = WE_FTM_MAX_STR_LEN;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_DBG_READ_REGISTER;
pMsgBuf->msgBodyLength = sizeof(tMsgPttDbgReadRegister) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->DbgReadRegister.regAddr = QWLAN_RFAPB_REV_ID_REG;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
reg_val = (v_U16_t)pMsgBody->DbgReadRegister.regValue;
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_GET_BUILD_RELEASE_NUMBER;
pMsgBuf->msgBodyLength = sizeof(tMsgPttGetBuildReleaseNumber) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
lenRes = snprintf(buf, lenBuf, "%s_",WLAN_CHIP_VERSION);
if(lenRes < 0 || lenRes >= lenBuf)
{
status = VOS_STATUS_E_FAILURE;
goto done;
}
buf += lenRes;
lenBuf -= lenRes;
/*Read the RevID*/
lenRes = snprintf(buf, lenBuf, "%x.%x-",(v_U8_t)(reg_val >> 8), (v_U8_t)(reg_val &0x000000FF));
if(lenRes < 0 || lenRes >= lenBuf)
{
status = VOS_STATUS_E_FAILURE;
goto done;
}
buf += lenRes;
lenBuf -= lenRes;
lenRes = snprintf(buf, lenBuf, "%s-", QWLAN_VERSIONSTR);
if(lenRes < 0 || lenRes >= lenBuf)
{
status = VOS_STATUS_E_FAILURE;
goto done;
}
buf += lenRes;
lenBuf -= lenRes;
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_get_txrate() -
This function gets the TX rate from the halphy ptt module and
returns the TX rate to the application.
\param - pAdapter - Pointer HDD Context.
- pTxRate - Poniter to get the Tx rate.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_get_txrate(hdd_adapter_t *pAdapter,char *pTxRate)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
v_U16_t rate_index,ii;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_GET_TX_POWER_REPORT;
pMsgBuf->msgBodyLength = sizeof(tMsgPttGetTxPowerReport) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse == PTT_STATUS_SUCCESS) {
rate_index = pMsgBody->GetTxPowerReport.rate;
}
else {
/*Return the default rate*/
//rate_index = HAL_PHY_RATE_11A_6_MBPS;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s: PTT_MSG_GET_TX_POWER_REPORT failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
for(ii = 0; ii < SIZE_OF_TABLE(rateName_rateIndex_tbl); ii++) {
if(rateName_rateIndex_tbl[ii].rate_index == rate_index)
break;
}
if(ii >= SIZE_OF_TABLE(rateName_rateIndex_tbl))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Invalid Rate Index\n",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
strlcpy(pTxRate,rateName_rateIndex_tbl[ii].rate_str, WE_FTM_MAX_STR_LEN);
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_get_rx_pkt_count() -
This function gets the rx pkt count from the halphy ptt module and
returns the rx pkt count to the application.
\param - pAdapter - Pointer HDD Context.
- pRxPktCnt - Poniter to get the rx pkt count.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_get_rx_pkt_count(hdd_adapter_t *pAdapter,v_U16_t *pRxPktCnt)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_GET_RX_PKT_COUNTS;
pMsgBuf->msgBodyLength = sizeof(tMsgPttGetRxPktCounts) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
*pRxPktCnt = pMsgBody->GetRxPktCounts.counters.totalRxPackets;
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_get_rx_rssi() -
This function gets the rx rssi from the halphy ptt module and
returns the rx rssi to the application.
\param - pAdapter - Pointer HDD Context.
- buf - Poniter to get rssi of Rx chains
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_get_rx_rssi(hdd_adapter_t *pAdapter,char *buf)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
int ret;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_GET_RX_RSSI;
pMsgBuf->msgBodyLength = sizeof(tMsgPttGetRxRssi) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
ret = snprintf(buf, WE_FTM_MAX_STR_LEN, " R0:%d, R1:%d",
pMsgBody->GetRxRssi.rssi.rx[0],
pMsgBody->GetRxRssi.rssi.rx[1]);
if( ret < 0 || ret >= WE_FTM_MAX_STR_LEN )
{
status = VOS_STATUS_E_FAILURE;
}
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return status;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_get_mac_address() -
This function gets the mac address from the halphy ptt module and
returns the mac address to the application.
\param - pAdapter - Pointer HDD Context.
- buf - Poniter to get the mac address.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_get_mac_address(hdd_adapter_t *pAdapter,char *buf)
{
v_BOOL_t itemIsValid = VOS_FALSE;
v_U8_t macAddr[VOS_MAC_ADDRESS_LEN] = {0, 0x0a, 0xf5, 4,5, 6};
int ret;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
/*Check the NV FIELD is valid or not*/
if (vos_nv_getValidity(VNV_FIELD_IMAGE, &itemIsValid) == VOS_STATUS_SUCCESS)
{
if (itemIsValid == VOS_TRUE)
{
vos_nv_readMacAddress(macAddr);
ret = snprintf(buf, WE_FTM_MAX_STR_LEN,
"%02x:%02x:%02x:%02x:%02x:%02x",
MAC_ADDR_ARRAY(macAddr));
if( ret < 0 || ret >= WE_FTM_MAX_STR_LEN )
{
return VOS_STATUS_E_FAILURE;
}
}
}
else
{
/*Return Hard coded mac address*/
ret = snprintf(buf, WE_FTM_MAX_STR_LEN,
"%02x:%02x:%02x:%02x:%02x:%02x",
MAC_ADDR_ARRAY(macAddr));
if( ret < 0 || ret >= WE_FTM_MAX_STR_LEN )
{
return VOS_STATUS_E_FAILURE;
}
}
return VOS_STATUS_SUCCESS;
}
/**---------------------------------------------------------------------------
\brief wlan_ftm_priv_set_mac_address() -
This function sets the mac address to the halphy ptt module and
sends the netlink message to the ptt socket application which writes
the macaddress to the qcom_wlan_nv.bin file
\param - pAdapter - Pointer HDD Context.
- buf - Poniter to the macaddress.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
static VOS_STATUS wlan_ftm_priv_set_mac_address(hdd_adapter_t *pAdapter,char *buf)
{
tPttMsgbuffer *pMsgBuf;
uPttMsgs *pMsgBody;
VOS_STATUS status;
int macAddr[VOS_MAC_ADDRESS_LEN];
v_U8_t *pMacAddress;
v_U8_t ii;
hdd_context_t *pHddCtx = (hdd_context_t *)pAdapter->pHddCtx;
if(pHddCtx->ftm.ftm_state != WLAN_FTM_STARTED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ftm has not started. Please start the ftm. ",__func__);
return VOS_STATUS_E_FAILURE;
}
pMsgBuf = (tPttMsgbuffer *)vos_mem_malloc(sizeof(tPttMsgbuffer));
if(pMsgBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pMsgBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
init_completion(&pHddCtx->ftm.ftm_comp_var);
pMsgBuf->msgId = PTT_MSG_SET_NV_FIELD;
pMsgBuf->msgBodyLength = sizeof(tMsgPttSetNvField) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->SetNvField.nvField = NV_COMMON_MAC_ADDR;
/*We get the mac address in string format "XX:XX:XX:XX:XX:XX" convert to hex*/
sscanf(buf,"%02x:%02x:%02x:%02x:%02x:%02x",&macAddr[0],(int*)&macAddr[1],(int*)&macAddr[2],(int*)&macAddr[3],(int*)&macAddr[4],(int*)&macAddr[5]);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "MacAddress = %02x:%02x:%02x:%02x:%02x:%02x",MAC_ADDR_ARRAY(macAddr));
pMacAddress = &pMsgBody->SetNvField.fieldData.macAddr[0];
for(ii = 0; ii < VOS_MAC_ADDRESS_LEN; ii++)
pMacAddress[ii] = (v_U8_t)macAddr[ii];
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "pMacAddress = %02x:%02x:%02x:%02x:%02x:%02x",MAC_ADDR_ARRAY(pMacAddress));
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed!!",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
if(pMsgBuf->msgResponse != PTT_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:Ptt response status failed",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "NV_COMMON_MAC_ADDR Success!!!\n");
init_completion(&pHddCtx->ftm.ftm_comp_var);
memset( pMsgBuf,0,sizeof(tPttMsgbuffer));
pMsgBuf->msgId = PTT_MSG_STORE_NV_TABLE;
pMsgBuf->msgBodyLength = sizeof(tMsgPttStoreNvTable) + PTT_HEADER_LENGTH;
pMsgBody = &pMsgBuf->msgBody;
pMsgBody->StoreNvTable.nvTable = NV_FIELDS_IMAGE;
status = wlan_ftm_postmsg((v_U8_t*)pMsgBuf,pMsgBuf->msgBodyLength);
if(status != VOS_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:wlan_ftm_postmsg failed!!!!",__func__);
status = VOS_STATUS_E_FAILURE;
goto done;
}
wait_for_completion_interruptible_timeout(&pHddCtx->ftm.ftm_comp_var, msecs_to_jiffies(WLAN_FTM_COMMAND_TIME_OUT));
done:
vos_mem_free((v_VOID_t * )pMsgBuf);
return VOS_STATUS_SUCCESS;
}
/* set param sub-ioctls */
static int iw_ftm_setchar_getnone(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int sub_cmd = wrqu->data.flags;
int ret = 0; /* success */
VOS_STATUS status;
hdd_adapter_t *pAdapter = (netdev_priv(dev));
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: Received length %d", __func__, wrqu->data.length);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: Received data %s", __func__, (char*)wrqu->data.pointer);
switch(sub_cmd)
{
case WE_SET_MAC_ADDRESS:
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "SET MAC ADDRESS\n");
status = wlan_ftm_priv_set_mac_address(pAdapter,(char*)wrqu->data.pointer);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_set_mac_address Failed =%d\n",status);
ret = -EINVAL;
}
}
break;
case WE_SET_TX_RATE:
{
status = wlan_ftm_priv_set_txrate(pAdapter,(char*)wrqu->data.pointer);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_set_txrate Failed =%d\n",status);
ret = -EINVAL;
}
break;
}
default:
{
hddLog(LOGE, "%s: Invalid sub command %d\n",__func__, sub_cmd);
ret = -EINVAL;
break;
}
}
return ret;
}
static int iw_ftm_setint_getnone(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = (netdev_priv(dev));
int *value = (int *)extra;
int sub_cmd = value[0];
int set_value = value[1];
int ret = 0; /* success */
VOS_STATUS status;
switch(sub_cmd)
{
case WE_FTM_ON_OFF:
{
status = wlan_ftm_priv_start_stop_ftm(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"%s Failed =%d\n",__func__, status);
ret = -EINVAL;
}
break;
}
case WE_TX_PKT_GEN:
status = wlan_ftm_priv_start_stop_tx_pktgen(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_start_stop_tx_pktgen Failed =%d\n",status);
ret = -EINVAL;
}
break;
case WE_SET_TX_IFS:
status = wlan_ftm_priv_set_txifs(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_set_txifs Failed =%d\n",status);
ret = -EINVAL;
}
break;
case WE_SET_TX_PKT_CNT:
status = wlan_ftm_priv_set_txpktcnt(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_set_txpktcnt Failed =%d\n",status);
ret = -EINVAL;
}
break;
case WE_SET_TX_PKT_LEN:
status = wlan_ftm_priv_set_txpktlen(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_set_txpktlen Failed =%d\n",status);
ret = -EINVAL;
}
break;
case WE_SET_CHANNEL:
{
status = wlan_ftm_priv_set_channel(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_set_channel Failed =%d\n",status);
ret = -EINVAL;
}
break;
}
case WE_SET_TX_POWER:
{
status = wlan_ftm_priv_set_txpower(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_set_txpower Failed =%d\n",status);
ret = -EINVAL;
}
break;
}
case WE_CLEAR_RX_PKT_CNT:
{
status = wlan_ftm_priv_rx_pkt_clear(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_rx_pkt_clear Failed =%d\n",status);
ret = -EINVAL;
}
break;
}
case WE_RX:
{
status = wlan_ftm_priv_rx_mode(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_rx_mode Failed =%d\n",status);
ret = -EINVAL;
}
break;
}
case WE_ENABLE_CHAIN:
{
status = wlan_ftm_priv_enable_chain(pAdapter,set_value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_enable_chain Failed =%d\n",status);
ret = -EINVAL;
}
break;
}
default:
{
hddLog(LOGE, "Invalid IOCTL setvalue command %d value %d \n",
sub_cmd, set_value);
break;
}
}
return ret;
}
/* get param sub-ioctls */
static int iw_ftm_setnone_getint(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = (netdev_priv(dev));
int *value = (int *)extra;
int ret = 0; /* success */
VOS_STATUS status;
switch (value[0])
{
case WE_GET_CHANNEL:
{
status = wlan_ftm_priv_get_channel(pAdapter,(v_U16_t*)value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_get_channel Failed =%d\n",status);
ret = -EINVAL;
}
break;
}
case WE_GET_TX_POWER:
{
status = wlan_ftm_priv_get_txpower(pAdapter,(v_U16_t*)value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_get_txpower Failed =%d\n",status);
ret = -EINVAL;
}
break;
}
case WE_GET_RX_PKT_CNT:
{
status = wlan_ftm_priv_get_rx_pkt_count(pAdapter,(v_U16_t*)value);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL,"wlan_ftm_priv_get_rx_pkt_count Failed =%d\n",status);
ret = -EINVAL;
}
break;
}
default:
{
hddLog(LOGE, "Invalid IOCTL get_value command %d ",value[0]);
break;
}
}
return ret;
}
static int iw_ftm_get_char_setnone(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int sub_cmd = wrqu->data.flags;
VOS_STATUS status;
hdd_adapter_t *pAdapter = (netdev_priv(dev));
switch(sub_cmd)
{
case WE_GET_MAC_ADDRESS:
{
status = wlan_ftm_priv_get_mac_address(pAdapter, extra);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "wlan_ftm_priv_get_mac_address failed =%d\n",status);
return -EINVAL;
}
wrqu->data.length = strlen(extra)+1;
break;
}
case WE_GET_TX_RATE:
{
status = wlan_ftm_priv_get_txrate(pAdapter, extra);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "wlan_ftm_priv_get_txrate failed =%d\n",status);
return -EINVAL;
}
wrqu->data.length = strlen(extra)+1;
break;
}
case WE_GET_FTM_VERSION:
{
status = wlan_ftm_priv_get_ftm_version(pAdapter, extra);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "wlan_ftm_priv_get_mac_address failed =%d\n",status);
return -EINVAL;
}
wrqu->data.length = strlen(extra)+1;
break;
}
case WE_GET_FTM_STATUS:
{
status = wlan_ftm_priv_get_status(pAdapter, extra);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "wlan_ftm_priv_get_status failed =%d\n",status);
return -EINVAL;
}
wrqu->data.length = strlen(extra)+1;
break;
}
case WE_GET_RX_RSSI:
{
status = wlan_ftm_priv_get_rx_rssi(pAdapter, extra);
if(status != VOS_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "wlan_ftm_priv_get_rx_rssi failed =%d\n",status);
return -EINVAL;
}
wrqu->data.length = strlen(extra)+1;
break;
}
default:
{
hddLog(LOGE, "Invalid IOCTL command %d \n", sub_cmd );
break;
}
}
return 0;
}
VOS_STATUS wlan_write_to_efs (v_U8_t *pData, v_U16_t data_len)
{
#if defined(MSM_PLATFORM)
tAniHdr *wmsg = NULL;
v_U8_t *pBuf;
hdd_context_t *pHddCtx = NULL;
v_CONTEXT_t pVosContext= NULL;
pBuf = (v_U8_t*)vos_mem_malloc(sizeof(tAniHdr) + sizeof(v_U32_t)+ data_len);
if(pBuf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pBuf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
wmsg = (tAniHdr*)pBuf;
wmsg->type = PTT_MSG_FTM_CMDS_TYPE;
wmsg->length = data_len + sizeof(tAniHdr)+ sizeof(v_U32_t);
wmsg->length = FTM_SWAP16(wmsg->length);
pBuf += sizeof(tAniHdr);
/*Get the global context */
pVosContext = vos_get_global_context(VOS_MODULE_ID_SYS, NULL);
/*Get the Hdd Context */
//pAdapter = ((VosContextType*)(pVosContext))->pHDDContext;
pHddCtx = (hdd_context_t *)(((VosContextType*)(pVosContext))->pHDDContext);
/* EfS command Code */
*(v_U32_t*)pBuf = 0x000000EF;
pBuf += sizeof(v_U32_t);
memcpy(pBuf, pData,data_len);
if(pHddCtx->ftm.cmd_iwpriv == TRUE) {
if( ptt_sock_send_msg_to_app(wmsg, 0, ANI_NL_MSG_PUMAC, pHddCtx->ptt_pid) < 0) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, ("Ptt Socket error sending message to the app!!\n"));
vos_mem_free((v_VOID_t*)wmsg);
return VOS_STATUS_E_FAILURE;
}
}
else {
if( ptt_sock_send_msg_to_app(wmsg, 0, ANI_NL_MSG_PUMAC, pHddCtx->ftm.wnl->nlh.nlmsg_pid) < 0) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, ("Ptt Socket error sending message to the app!!\n"));
vos_mem_free((v_VOID_t*)wmsg);
return VOS_STATUS_E_FAILURE;
}
}
vos_mem_free((v_VOID_t*)wmsg);
#endif //FTM and ANDROID
return VOS_STATUS_SUCCESS;
}
/* action sub-ioctls */
static int iw_ftm_setnone_getnone(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int sub_cmd = wrqu->data.flags;
int ret = 0; /* success */
switch (sub_cmd)
{
case WE_SET_NV_DEFAULTS:
{
v_U8_t *pu8buf,*pTempBuf;
v_U16_t size;
size = sizeof(v_U32_t) + sizeof(sHalNv);
hddLog(VOS_TRACE_LEVEL_INFO_HIGH,"HAL NV Size =%d\n",size);
pu8buf = vos_mem_malloc(size);
if(pu8buf == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:pu8buf is NULL",__func__);
return VOS_STATUS_E_NOMEM;
}
memset(pu8buf,0,size);
pTempBuf = pu8buf;
pTempBuf += sizeof(v_U32_t);
memcpy(pTempBuf,&nvDefaults,sizeof(sHalNv));
wlan_write_to_efs(pu8buf,size);
vos_mem_free(pu8buf);
}
default:
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,"%s: unknown ioctl %d", __func__, sub_cmd);
hddLog(LOGE, "Invalid IOCTL action command %d ", sub_cmd);
break;
}
}
return ret;
}
static const iw_handler we_ftm_private[] = {
[WLAN_FTM_PRIV_SET_INT_GET_NONE - SIOCIWFIRSTPRIV] = iw_ftm_setint_getnone, //set priv ioctl
[WLAN_FTM_PRIV_SET_NONE_GET_INT - SIOCIWFIRSTPRIV] = iw_ftm_setnone_getint, //get priv ioctl
[WLAN_FTM_PRIV_SET_CHAR_GET_NONE - SIOCIWFIRSTPRIV] = iw_ftm_setchar_getnone, //get priv ioctl
[WLAN_FTM_PRIV_GET_CHAR_SET_NONE - SIOCIWFIRSTPRIV] = iw_ftm_get_char_setnone,
[WLAN_FTM_PRIV_SET_NONE_GET_NONE - SIOCIWFIRSTPRIV] = iw_ftm_setnone_getnone, //action priv ioctl
};
/*Maximum command length can be only 15 */
static const struct iw_priv_args we_ftm_private_args[] = {
/* handlers for main ioctl */
{ WLAN_FTM_PRIV_SET_INT_GET_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"" },
{ WE_FTM_ON_OFF,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"ftm" },
{ WE_TX_PKT_GEN,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"tx" },
{ WE_SET_TX_IFS,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"set_txifs" },
{ WE_SET_TX_PKT_CNT,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"set_txpktcnt" },
{ WE_SET_TX_PKT_LEN,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"set_txpktlen" },
{ WE_SET_CHANNEL,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"set_channel" },
{ WE_SET_TX_POWER,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"set_txpower" },
{ WE_CLEAR_RX_PKT_CNT,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"clr_rxpktcnt" },
{ WE_RX,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"rx" },
{ WE_ENABLE_CHAIN,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"ena_chain" },
/* handlers for main ioctl */
{ WLAN_FTM_PRIV_SET_NONE_GET_INT,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"" },
{ WE_GET_CHANNEL,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get_channel" },
{ WE_GET_TX_POWER,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get_txpower" },
{ WE_GET_RX_PKT_CNT,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get_rxpktcnt" },
/* handlers for main ioctl */
{ WLAN_FTM_PRIV_SET_CHAR_GET_NONE,
IW_PRIV_TYPE_CHAR| 512,
0,
"" },
{ WE_SET_MAC_ADDRESS,
IW_PRIV_TYPE_CHAR| 512,
0,
"set_mac_address" },
{ WE_SET_TX_RATE,
IW_PRIV_TYPE_CHAR | 512,
0,
"set_txrate" },
/* handlers for main ioctl */
{ WLAN_FTM_PRIV_GET_CHAR_SET_NONE,
0,
IW_PRIV_TYPE_CHAR| WE_FTM_MAX_STR_LEN,
"" },
{ WE_GET_MAC_ADDRESS,
0,
IW_PRIV_TYPE_CHAR| WE_FTM_MAX_STR_LEN,
"get_mac_address" },
{ WE_GET_FTM_VERSION,
0,
IW_PRIV_TYPE_CHAR| WE_FTM_MAX_STR_LEN,
"ftm_version" },
{ WE_GET_TX_RATE,
0,
IW_PRIV_TYPE_CHAR| WE_FTM_MAX_STR_LEN,
"get_txrate" },
{ WE_GET_FTM_STATUS,
0,
IW_PRIV_TYPE_CHAR| WE_FTM_MAX_STR_LEN,
"get_status" },
{ WE_GET_RX_RSSI,
0,
IW_PRIV_TYPE_CHAR| WE_FTM_MAX_STR_LEN,
"get_rx_rssi" },
/* handlers for main ioctl */
{ WLAN_FTM_PRIV_SET_NONE_GET_NONE,
0,
0,
"" },
/* handlers for sub-ioctl */
{ WE_SET_NV_DEFAULTS,
0,
0,
"set_nv_defaults" },
};
const struct iw_handler_def we_ftm_handler_def = {
.num_standard = 0,
.num_private = sizeof(we_ftm_private) / sizeof(we_ftm_private[0]),
.num_private_args = sizeof(we_ftm_private_args) / sizeof(we_ftm_private_args[0]),
.standard = (iw_handler *)NULL,
.private = (iw_handler *)we_ftm_private,
.private_args = we_ftm_private_args,
.get_wireless_stats = NULL,
};
static int wlan_ftm_register_wext(hdd_adapter_t *pAdapter)
{
//hdd_wext_state_t *pwextBuf = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
// Zero the memory. This zeros the profile structure.
//memset(pwextBuf, 0,sizeof(hdd_wext_state_t));
pAdapter->dev->wireless_handlers = (struct iw_handler_def *)&we_ftm_handler_def;
return 0;
}
VOS_STATUS WLANFTM_McProcessMsg (v_VOID_t *message)
{
ftm_rsp_msg_t *pFtmMsgRsp;
VOS_STATUS vos_status = VOS_STATUS_SUCCESS;
hdd_context_t *pHddCtx;
v_CONTEXT_t pVosContext= NULL;
ENTER();
pFtmMsgRsp = (ftm_rsp_msg_t *)message;
if (!message )
{
VOS_TRACE( VOS_MODULE_ID_SYS, VOS_TRACE_LEVEL_ERROR,
"WLAN FTM:Invalid parameter sent on WLANFTM_ProcessMainMessage");
return VOS_STATUS_E_INVAL;
}
/*Get the global context */
pVosContext = vos_get_global_context(VOS_MODULE_ID_SYS, NULL);
/*Get the Hdd Context */
pHddCtx = ((VosContextType*)(pVosContext))->pHDDContext;
if (pHddCtx->ftm.cmd_iwpriv == TRUE) {
complete(&pHddCtx->ftm.ftm_comp_var);
}
else {
/*Response length to Ptt App*/
pHddCtx->ftm.wnl->wmsg.length = sizeof(tAniHdr)+ SIZE_OF_FTM_DIAG_HEADER_LEN + pFtmMsgRsp->msgBodyLength;
/*Ptt App expects the response length in LE */
pHddCtx->ftm.wnl->wmsg.length = FTM_SWAP16(pHddCtx->ftm.wnl->wmsg.length);
/*Response expects the length to be in */
pHddCtx->ftm.pResponseBuf->ftm_hdr.data_len = pHddCtx->ftm.pRequestBuf->ftm_hdr.data_len -
sizeof(pHddCtx->ftm.pRequestBuf->ftm_hdr.data_len);
/*Copy the message*/
memcpy((char*)&pHddCtx->ftm.pResponseBuf->ftmpkt,(char*)message,pFtmMsgRsp->msgBodyLength);
/*Update the error code*/
pHddCtx->ftm.pResponseBuf->ftm_err_code = WLAN_FTM_SUCCESS;
vos_status = vos_event_set(&pHddCtx->ftm.ftm_vos_event);
if (!VOS_IS_STATUS_SUCCESS(vos_status))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, ("ERROR: HDD vos_event_set failed!!\n"));
return VOS_STATUS_E_FAILURE;
}
}
EXIT();
return VOS_STATUS_SUCCESS;
}
| gpl-2.0 |
guh/linux-imx6-3.14-tune | drivers/base/firmware_class.c | 90 | 39832 | /*
* firmware_class.c - Multi purpose firmware loading support
*
* Copyright (c) 2003 Manuel Estrada Sainz
*
* Please see Documentation/firmware_class/ for more information.
*
*/
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/highmem.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/async.h>
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <generated/utsrelease.h>
#include "base.h"
MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
/* Builtin firmware support */
#ifdef CONFIG_FW_LOADER
extern struct builtin_fw __start_builtin_fw[];
extern struct builtin_fw __end_builtin_fw[];
static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
{
struct builtin_fw *b_fw;
for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
if (strcmp(name, b_fw->name) == 0) {
fw->size = b_fw->size;
fw->data = b_fw->data;
return true;
}
}
return false;
}
static bool fw_is_builtin_firmware(const struct firmware *fw)
{
struct builtin_fw *b_fw;
for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
if (fw->data == b_fw->data)
return true;
return false;
}
#else /* Module case - no builtin firmware support */
static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
{
return false;
}
static inline bool fw_is_builtin_firmware(const struct firmware *fw)
{
return false;
}
#endif
enum {
FW_STATUS_LOADING,
FW_STATUS_DONE,
FW_STATUS_ABORT,
};
static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
{
return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
}
/* firmware behavior options */
#define FW_OPT_UEVENT (1U << 0)
#define FW_OPT_NOWAIT (1U << 1)
#ifdef CONFIG_FW_LOADER_USER_HELPER
#define FW_OPT_FALLBACK (1U << 2)
#else
#define FW_OPT_FALLBACK 0
#endif
struct firmware_cache {
/* firmware_buf instance will be added into the below list */
spinlock_t lock;
struct list_head head;
int state;
#ifdef CONFIG_PM_SLEEP
/*
* Names of firmware images which have been cached successfully
* will be added into the below list so that device uncache
* helper can trace which firmware images have been cached
* before.
*/
spinlock_t name_lock;
struct list_head fw_names;
struct delayed_work work;
struct notifier_block pm_notify;
#endif
};
struct firmware_buf {
struct kref ref;
struct list_head list;
struct completion completion;
struct firmware_cache *fwc;
unsigned long status;
void *data;
size_t size;
#ifdef CONFIG_FW_LOADER_USER_HELPER
bool is_paged_buf;
bool need_uevent;
struct page **pages;
int nr_pages;
int page_array_size;
struct list_head pending_list;
#endif
char fw_id[];
};
struct fw_cache_entry {
struct list_head list;
char name[];
};
struct fw_name_devm {
unsigned long magic;
char name[];
};
#define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
#define FW_LOADER_NO_CACHE 0
#define FW_LOADER_START_CACHE 1
static int fw_cache_piggyback_on_request(const char *name);
/* fw_lock could be moved to 'struct firmware_priv' but since it is just
* guarding for corner cases a global lock should be OK */
static DEFINE_MUTEX(fw_lock);
static struct firmware_cache fw_cache;
static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
struct firmware_cache *fwc)
{
struct firmware_buf *buf;
buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1 , GFP_ATOMIC);
if (!buf)
return buf;
kref_init(&buf->ref);
strcpy(buf->fw_id, fw_name);
buf->fwc = fwc;
init_completion(&buf->completion);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&buf->pending_list);
#endif
pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
return buf;
}
static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
{
struct firmware_buf *tmp;
struct firmware_cache *fwc = &fw_cache;
list_for_each_entry(tmp, &fwc->head, list)
if (!strcmp(tmp->fw_id, fw_name))
return tmp;
return NULL;
}
static int fw_lookup_and_allocate_buf(const char *fw_name,
struct firmware_cache *fwc,
struct firmware_buf **buf)
{
struct firmware_buf *tmp;
spin_lock(&fwc->lock);
tmp = __fw_lookup_buf(fw_name);
if (tmp) {
kref_get(&tmp->ref);
spin_unlock(&fwc->lock);
*buf = tmp;
return 1;
}
tmp = __allocate_fw_buf(fw_name, fwc);
if (tmp)
list_add(&tmp->list, &fwc->head);
spin_unlock(&fwc->lock);
*buf = tmp;
return tmp ? 0 : -ENOMEM;
}
static void __fw_free_buf(struct kref *ref)
__releases(&fwc->lock)
{
struct firmware_buf *buf = to_fwbuf(ref);
struct firmware_cache *fwc = buf->fwc;
pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
__func__, buf->fw_id, buf, buf->data,
(unsigned int)buf->size);
list_del(&buf->list);
spin_unlock(&fwc->lock);
#ifdef CONFIG_FW_LOADER_USER_HELPER
if (buf->is_paged_buf) {
int i;
vunmap(buf->data);
for (i = 0; i < buf->nr_pages; i++)
__free_page(buf->pages[i]);
kfree(buf->pages);
} else
#endif
vfree(buf->data);
kfree(buf);
}
static void fw_free_buf(struct firmware_buf *buf)
{
struct firmware_cache *fwc = buf->fwc;
spin_lock(&fwc->lock);
if (!kref_put(&buf->ref, __fw_free_buf))
spin_unlock(&fwc->lock);
}
/* direct firmware loading support */
static char fw_path_para[256];
static const char * const fw_path[] = {
fw_path_para,
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
"/lib/firmware/" UTS_RELEASE,
"/lib/firmware"
};
/*
* Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
* from kernel command line because firmware_class is generally built in
* kernel instead of module.
*/
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
/* Don't inline this: 'struct kstat' is biggish */
static noinline_for_stack int fw_file_size(struct file *file)
{
struct kstat st;
if (vfs_getattr(&file->f_path, &st))
return -1;
if (!S_ISREG(st.mode))
return -1;
if (st.size != (int)st.size)
return -1;
return st.size;
}
static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
{
int size;
char *buf;
int rc;
size = fw_file_size(file);
if (size <= 0)
return -EINVAL;
buf = vmalloc(size);
if (!buf)
return -ENOMEM;
rc = kernel_read(file, 0, buf, size);
if (rc != size) {
if (rc > 0)
rc = -EIO;
vfree(buf);
return rc;
}
fw_buf->data = buf;
fw_buf->size = size;
return 0;
}
static int fw_get_filesystem_firmware(struct device *device,
struct firmware_buf *buf)
{
int i;
int rc = -ENOENT;
char *path = __getname();
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
struct file *file;
/* skip the unset customized path */
if (!fw_path[i][0])
continue;
snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
file = filp_open(path, O_RDONLY, 0);
if (IS_ERR(file))
continue;
rc = fw_read_file_contents(file, buf);
fput(file);
if (rc)
dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n",
path, rc);
else
break;
}
__putname(path);
if (!rc) {
dev_dbg(device, "firmware: direct-loading firmware %s\n",
buf->fw_id);
mutex_lock(&fw_lock);
set_bit(FW_STATUS_DONE, &buf->status);
complete_all(&buf->completion);
mutex_unlock(&fw_lock);
}
return rc;
}
/* firmware holds the ownership of pages */
static void firmware_free_data(const struct firmware *fw)
{
/* Loaded directly? */
if (!fw->priv) {
vfree(fw->data);
return;
}
fw_free_buf(fw->priv);
}
/* store the pages buffer info firmware from buf */
static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
{
fw->priv = buf;
#ifdef CONFIG_FW_LOADER_USER_HELPER
fw->pages = buf->pages;
#endif
fw->size = buf->size;
fw->data = buf->data;
pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
__func__, buf->fw_id, buf, buf->data,
(unsigned int)buf->size);
}
#ifdef CONFIG_PM_SLEEP
static void fw_name_devm_release(struct device *dev, void *res)
{
struct fw_name_devm *fwn = res;
if (fwn->magic == (unsigned long)&fw_cache)
pr_debug("%s: fw_name-%s devm-%p released\n",
__func__, fwn->name, res);
}
static int fw_devm_match(struct device *dev, void *res,
void *match_data)
{
struct fw_name_devm *fwn = res;
return (fwn->magic == (unsigned long)&fw_cache) &&
!strcmp(fwn->name, match_data);
}
static struct fw_name_devm *fw_find_devm_name(struct device *dev,
const char *name)
{
struct fw_name_devm *fwn;
fwn = devres_find(dev, fw_name_devm_release,
fw_devm_match, (void *)name);
return fwn;
}
/* add firmware name into devres list */
static int fw_add_devm_name(struct device *dev, const char *name)
{
struct fw_name_devm *fwn;
fwn = fw_find_devm_name(dev, name);
if (fwn)
return 1;
fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
strlen(name) + 1, GFP_KERNEL);
if (!fwn)
return -ENOMEM;
fwn->magic = (unsigned long)&fw_cache;
strcpy(fwn->name, name);
devres_add(dev, fwn);
return 0;
}
#else
static int fw_add_devm_name(struct device *dev, const char *name)
{
return 0;
}
#endif
/*
* user-mode helper code
*/
#ifdef CONFIG_FW_LOADER_USER_HELPER
struct firmware_priv {
struct delayed_work timeout_work;
bool nowait;
struct device dev;
struct firmware_buf *buf;
struct firmware *fw;
};
static struct firmware_priv *to_firmware_priv(struct device *dev)
{
return container_of(dev, struct firmware_priv, dev);
}
static void __fw_load_abort(struct firmware_buf *buf)
{
/*
* There is a small window in which user can write to 'loading'
* between loading done and disappearance of 'loading'
*/
if (test_bit(FW_STATUS_DONE, &buf->status))
return;
list_del_init(&buf->pending_list);
set_bit(FW_STATUS_ABORT, &buf->status);
complete_all(&buf->completion);
}
static void fw_load_abort(struct firmware_priv *fw_priv)
{
struct firmware_buf *buf = fw_priv->buf;
__fw_load_abort(buf);
/* avoid user action after loading abort */
fw_priv->buf = NULL;
}
#define is_fw_load_aborted(buf) \
test_bit(FW_STATUS_ABORT, &(buf)->status)
static LIST_HEAD(pending_fw_head);
/* reboot notifier for avoid deadlock with usermode_lock */
static int fw_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
mutex_lock(&fw_lock);
while (!list_empty(&pending_fw_head))
__fw_load_abort(list_first_entry(&pending_fw_head,
struct firmware_buf,
pending_list));
mutex_unlock(&fw_lock);
return NOTIFY_DONE;
}
static struct notifier_block fw_shutdown_nb = {
.notifier_call = fw_shutdown_notify,
};
static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", loading_timeout);
}
/**
* firmware_timeout_store - set number of seconds to wait for firmware
* @class: device class pointer
* @attr: device attribute pointer
* @buf: buffer to scan for timeout value
* @count: number of bytes in @buf
*
* Sets the number of seconds to wait for the firmware. Once
* this expires an error will be returned to the driver and no
* firmware will be provided.
*
* Note: zero means 'wait forever'.
**/
static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
loading_timeout = simple_strtol(buf, NULL, 10);
if (loading_timeout < 0)
loading_timeout = 0;
return count;
}
static struct class_attribute firmware_class_attrs[] = {
__ATTR_RW(timeout),
__ATTR_NULL
};
static void fw_dev_release(struct device *dev)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
kfree(fw_priv);
}
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
return -ENOMEM;
if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
return -ENOMEM;
return 0;
}
static struct class firmware_class = {
.name = "firmware",
.class_attrs = firmware_class_attrs,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
int loading = 0;
mutex_lock(&fw_lock);
if (fw_priv->buf)
loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
mutex_unlock(&fw_lock);
return sprintf(buf, "%d\n", loading);
}
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
#endif
/* one pages buffer should be mapped/unmapped only once */
static int fw_map_pages_buf(struct firmware_buf *buf)
{
if (!buf->is_paged_buf)
return 0;
if (buf->data)
vunmap(buf->data);
buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
if (!buf->data)
return -ENOMEM;
return 0;
}
/**
* firmware_loading_store - set value in the 'loading' control file
* @dev: device pointer
* @attr: device attribute pointer
* @buf: buffer to scan for loading control value
* @count: number of bytes in @buf
*
* The relevant values are:
*
* 1: Start a load, discarding any previous partial load.
* 0: Conclude the load and hand the data to the driver code.
* -1: Conclude the load with an error and discard any written data.
**/
static ssize_t firmware_loading_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
struct firmware_buf *fw_buf;
int loading = simple_strtol(buf, NULL, 10);
int i;
mutex_lock(&fw_lock);
fw_buf = fw_priv->buf;
if (!fw_buf)
goto out;
switch (loading) {
case 1:
/* discarding any previous partial load */
if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
for (i = 0; i < fw_buf->nr_pages; i++)
__free_page(fw_buf->pages[i]);
kfree(fw_buf->pages);
fw_buf->pages = NULL;
fw_buf->page_array_size = 0;
fw_buf->nr_pages = 0;
set_bit(FW_STATUS_LOADING, &fw_buf->status);
}
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
set_bit(FW_STATUS_DONE, &fw_buf->status);
clear_bit(FW_STATUS_LOADING, &fw_buf->status);
/*
* Several loading requests may be pending on
* one same firmware buf, so let all requests
* see the mapped 'buf->data' once the loading
* is completed.
* */
fw_map_pages_buf(fw_buf);
list_del_init(&fw_buf->pending_list);
complete_all(&fw_buf->completion);
break;
}
/* fallthrough */
default:
dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
/* fallthrough */
case -1:
fw_load_abort(fw_priv);
break;
}
out:
mutex_unlock(&fw_lock);
return count;
}
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct firmware_priv *fw_priv = to_firmware_priv(dev);
struct firmware_buf *buf;
ssize_t ret_count;
mutex_lock(&fw_lock);
buf = fw_priv->buf;
if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
ret_count = -ENODEV;
goto out;
}
if (offset > buf->size) {
ret_count = 0;
goto out;
}
if (count > buf->size - offset)
count = buf->size - offset;
ret_count = count;
while (count) {
void *page_data;
int page_nr = offset >> PAGE_SHIFT;
int page_ofs = offset & (PAGE_SIZE-1);
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
page_data = kmap(buf->pages[page_nr]);
memcpy(buffer, page_data + page_ofs, page_cnt);
kunmap(buf->pages[page_nr]);
buffer += page_cnt;
offset += page_cnt;
count -= page_cnt;
}
out:
mutex_unlock(&fw_lock);
return ret_count;
}
static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
{
struct firmware_buf *buf = fw_priv->buf;
int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
/* If the array of pages is too small, grow it... */
if (buf->page_array_size < pages_needed) {
int new_array_size = max(pages_needed,
buf->page_array_size * 2);
struct page **new_pages;
new_pages = kmalloc(new_array_size * sizeof(void *),
GFP_KERNEL);
if (!new_pages) {
fw_load_abort(fw_priv);
return -ENOMEM;
}
memcpy(new_pages, buf->pages,
buf->page_array_size * sizeof(void *));
memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
(new_array_size - buf->page_array_size));
kfree(buf->pages);
buf->pages = new_pages;
buf->page_array_size = new_array_size;
}
while (buf->nr_pages < pages_needed) {
buf->pages[buf->nr_pages] =
alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (!buf->pages[buf->nr_pages]) {
fw_load_abort(fw_priv);
return -ENOMEM;
}
buf->nr_pages++;
}
return 0;
}
/**
* firmware_data_write - write method for firmware
* @filp: open sysfs file
* @kobj: kobject for the device
* @bin_attr: bin_attr structure
* @buffer: buffer being written
* @offset: buffer offset for write in total data store area
* @count: buffer size
*
* Data written to the 'data' attribute will be later handed to
* the driver as a firmware image.
**/
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct firmware_priv *fw_priv = to_firmware_priv(dev);
struct firmware_buf *buf;
ssize_t retval;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
mutex_lock(&fw_lock);
buf = fw_priv->buf;
if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
retval = -ENODEV;
goto out;
}
retval = fw_realloc_buffer(fw_priv, offset + count);
if (retval)
goto out;
retval = count;
while (count) {
void *page_data;
int page_nr = offset >> PAGE_SHIFT;
int page_ofs = offset & (PAGE_SIZE - 1);
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
page_data = kmap(buf->pages[page_nr]);
memcpy(page_data + page_ofs, buffer, page_cnt);
kunmap(buf->pages[page_nr]);
buffer += page_cnt;
offset += page_cnt;
count -= page_cnt;
}
buf->size = max_t(size_t, offset, buf->size);
out:
mutex_unlock(&fw_lock);
return retval;
}
static struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
.read = firmware_data_read,
.write = firmware_data_write,
};
static void firmware_class_timeout_work(struct work_struct *work)
{
struct firmware_priv *fw_priv = container_of(work,
struct firmware_priv, timeout_work.work);
mutex_lock(&fw_lock);
fw_load_abort(fw_priv);
mutex_unlock(&fw_lock);
}
static struct firmware_priv *
fw_create_instance(struct firmware *firmware, const char *fw_name,
struct device *device, unsigned int opt_flags)
{
struct firmware_priv *fw_priv;
struct device *f_dev;
fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
if (!fw_priv) {
dev_err(device, "%s: kmalloc failed\n", __func__);
fw_priv = ERR_PTR(-ENOMEM);
goto exit;
}
fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
fw_priv->fw = firmware;
INIT_DELAYED_WORK(&fw_priv->timeout_work,
firmware_class_timeout_work);
f_dev = &fw_priv->dev;
device_initialize(f_dev);
dev_set_name(f_dev, "%s", fw_name);
f_dev->parent = device;
f_dev->class = &firmware_class;
exit:
return fw_priv;
}
/* load a firmware via user helper */
static int _request_firmware_load(struct firmware_priv *fw_priv,
unsigned int opt_flags, long timeout)
{
int retval = 0;
struct device *f_dev = &fw_priv->dev;
struct firmware_buf *buf = fw_priv->buf;
/* fall back on userspace loading */
buf->is_paged_buf = true;
dev_set_uevent_suppress(f_dev, true);
retval = device_add(f_dev);
if (retval) {
dev_err(f_dev, "%s: device_register failed\n", __func__);
goto err_put_dev;
}
retval = device_create_bin_file(f_dev, &firmware_attr_data);
if (retval) {
dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
goto err_del_dev;
}
mutex_lock(&fw_lock);
list_add(&buf->pending_list, &pending_fw_head);
mutex_unlock(&fw_lock);
retval = device_create_file(f_dev, &dev_attr_loading);
if (retval) {
mutex_lock(&fw_lock);
list_del_init(&buf->pending_list);
mutex_unlock(&fw_lock);
dev_err(f_dev, "%s: device_create_file failed\n", __func__);
goto err_del_bin_attr;
}
if (opt_flags & FW_OPT_UEVENT) {
buf->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
if (timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&fw_priv->timeout_work, timeout);
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
wait_for_completion(&buf->completion);
cancel_delayed_work_sync(&fw_priv->timeout_work);
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
device_remove_bin_file(f_dev, &firmware_attr_data);
err_del_dev:
device_del(f_dev);
err_put_dev:
put_device(f_dev);
return retval;
}
static int fw_load_from_user_helper(struct firmware *firmware,
const char *name, struct device *device,
unsigned int opt_flags, long timeout)
{
struct firmware_priv *fw_priv;
fw_priv = fw_create_instance(firmware, name, device, opt_flags);
if (IS_ERR(fw_priv))
return PTR_ERR(fw_priv);
fw_priv->buf = firmware->priv;
return _request_firmware_load(fw_priv, opt_flags, timeout);
}
#ifdef CONFIG_PM_SLEEP
/* kill pending requests without uevent to avoid blocking suspend */
static void kill_requests_without_uevent(void)
{
struct firmware_buf *buf;
struct firmware_buf *next;
mutex_lock(&fw_lock);
list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
if (!buf->need_uevent)
__fw_load_abort(buf);
}
mutex_unlock(&fw_lock);
}
#endif
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int
fw_load_from_user_helper(struct firmware *firmware, const char *name,
struct device *device, unsigned int opt_flags,
long timeout)
{
return -ENOENT;
}
/* No abort during direct loading */
#define is_fw_load_aborted(buf) false
#ifdef CONFIG_PM_SLEEP
static inline void kill_requests_without_uevent(void) { }
#endif
#endif /* CONFIG_FW_LOADER_USER_HELPER */
/* wait until the shared firmware_buf becomes ready (or error) */
static int sync_cached_firmware_buf(struct firmware_buf *buf)
{
int ret = 0;
mutex_lock(&fw_lock);
while (!test_bit(FW_STATUS_DONE, &buf->status)) {
if (is_fw_load_aborted(buf)) {
ret = -ENOENT;
break;
}
mutex_unlock(&fw_lock);
wait_for_completion(&buf->completion);
mutex_lock(&fw_lock);
}
mutex_unlock(&fw_lock);
return ret;
}
/* prepare firmware and firmware_buf structs;
* return 0 if a firmware is already assigned, 1 if need to load one,
* or a negative error code
*/
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
struct device *device)
{
struct firmware *firmware;
struct firmware_buf *buf;
int ret;
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
dev_err(device, "%s: kmalloc(struct firmware) failed\n",
__func__);
return -ENOMEM;
}
if (fw_get_builtin_firmware(firmware, name)) {
dev_dbg(device, "firmware: using built-in firmware %s\n", name);
return 0; /* assigned */
}
ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
/*
* bind with 'buf' now to avoid warning in failure path
* of requesting firmware.
*/
firmware->priv = buf;
if (ret > 0) {
ret = sync_cached_firmware_buf(buf);
if (!ret) {
fw_set_page_data(buf, firmware);
return 0; /* assigned */
}
}
if (ret < 0)
return ret;
return 1; /* need to load */
}
static int assign_firmware_buf(struct firmware *fw, struct device *device,
unsigned int opt_flags)
{
struct firmware_buf *buf = fw->priv;
mutex_lock(&fw_lock);
if (!buf->size || is_fw_load_aborted(buf)) {
mutex_unlock(&fw_lock);
return -ENOENT;
}
/*
* add firmware name into devres list so that we can auto cache
* and uncache firmware for device.
*
* device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
/* don't cache firmware handled without uevent */
if (device && (opt_flags & FW_OPT_UEVENT))
fw_add_devm_name(device, buf->fw_id);
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
if (buf->fwc->state == FW_LOADER_START_CACHE) {
if (fw_cache_piggyback_on_request(buf->fw_id))
kref_get(&buf->ref);
}
/* pass the pages buffer to driver at the last minute */
fw_set_page_data(buf, fw);
mutex_unlock(&fw_lock);
return 0;
}
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device, unsigned int opt_flags)
{
struct firmware *fw;
long timeout;
int ret;
if (!firmware_p)
return -EINVAL;
ret = _request_firmware_prepare(&fw, name, device);
if (ret <= 0) /* error or already assigned */
goto out;
ret = 0;
timeout = firmware_loading_timeout();
if (opt_flags & FW_OPT_NOWAIT) {
timeout = usermodehelper_read_lock_wait(timeout);
if (!timeout) {
dev_dbg(device, "firmware: %s loading timed out\n",
name);
ret = -EBUSY;
goto out;
}
} else {
ret = usermodehelper_read_trylock();
if (WARN_ON(ret)) {
dev_err(device, "firmware: %s will not be loaded\n",
name);
goto out;
}
}
ret = fw_get_filesystem_firmware(device, fw->priv);
if (ret) {
if (opt_flags & FW_OPT_FALLBACK) {
dev_warn(device,
"Direct firmware load failed with error %d\n",
ret);
dev_warn(device, "Falling back to user helper\n");
ret = fw_load_from_user_helper(fw, name, device,
opt_flags, timeout);
}
}
if (!ret)
ret = assign_firmware_buf(fw, device, opt_flags);
usermodehelper_read_unlock();
out:
if (ret < 0) {
release_firmware(fw);
fw = NULL;
}
*firmware_p = fw;
return ret;
}
/**
* request_firmware: - send firmware request and wait for it
* @firmware_p: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded
*
* @firmware_p will be used to return a firmware image by the name
* of @name for device @device.
*
* Should be called from user context where sleeping is allowed.
*
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
*
* Caller must hold the reference count of @device.
*
* The function can be called safely inside device's suspend and
* resume callback.
**/
int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
int ret;
/* Need to pin this module until return */
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device,
FW_OPT_UEVENT | FW_OPT_FALLBACK);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL(request_firmware);
#ifdef CONFIG_FW_LOADER_USER_HELPER
/**
* request_firmware: - load firmware directly without usermode helper
* @firmware_p: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded
*
* This function works pretty much like request_firmware(), but this doesn't
* fall back to usermode helper even if the firmware couldn't be loaded
* directly from fs. Hence it's useful for loading optional firmwares, which
* aren't always present, without extra long timeouts of udev.
**/
int request_firmware_direct(const struct firmware **firmware_p,
const char *name, struct device *device)
{
int ret;
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL_GPL(request_firmware_direct);
#endif
/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
void release_firmware(const struct firmware *fw)
{
if (fw) {
if (!fw_is_builtin_firmware(fw))
firmware_free_data(fw);
kfree(fw);
}
}
EXPORT_SYMBOL(release_firmware);
/* Async support */
struct firmware_work {
struct work_struct work;
struct module *module;
const char *name;
struct device *device;
void *context;
void (*cont)(const struct firmware *fw, void *context);
unsigned int opt_flags;
};
static void request_firmware_work_func(struct work_struct *work)
{
struct firmware_work *fw_work;
const struct firmware *fw;
fw_work = container_of(work, struct firmware_work, work);
_request_firmware(&fw, fw_work->name, fw_work->device,
fw_work->opt_flags);
fw_work->cont(fw, fw_work->context);
put_device(fw_work->device); /* taken in request_firmware_nowait() */
module_put(fw_work->module);
kfree(fw_work);
}
/**
* request_firmware_nowait - asynchronous version of request_firmware
* @module: module requesting the firmware
* @uevent: sends uevent to copy the firmware image if this flag
* is non-zero else the firmware copy must be done manually.
* @name: name of firmware file
* @device: device for which firmware is being loaded
* @gfp: allocation flags
* @context: will be passed over to @cont, and
* @fw may be %NULL if firmware request fails.
* @cont: function will be called asynchronously when the firmware
* request is over.
*
* Caller must hold the reference count of @device.
*
* Asynchronous variant of request_firmware() for user contexts:
* - sleep for as small periods as possible since it may
* increase kernel boot time of built-in device drivers
* requesting firmware in their ->probe() methods, if
* @gfp is GFP_KERNEL.
*
* - can't sleep at all if @gfp is GFP_ATOMIC.
**/
int
request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context))
{
struct firmware_work *fw_work;
fw_work = kzalloc(sizeof (struct firmware_work), gfp);
if (!fw_work)
return -ENOMEM;
fw_work->module = module;
fw_work->name = name;
fw_work->device = device;
fw_work->context = context;
fw_work->cont = cont;
fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
(uevent ? FW_OPT_UEVENT : 0);
if (!try_module_get(module)) {
kfree(fw_work);
return -EFAULT;
}
get_device(fw_work->device);
INIT_WORK(&fw_work->work, request_firmware_work_func);
schedule_work(&fw_work->work);
return 0;
}
EXPORT_SYMBOL(request_firmware_nowait);
#ifdef CONFIG_PM_SLEEP
static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
/**
* cache_firmware - cache one firmware image in kernel memory space
* @fw_name: the firmware image name
*
* Cache firmware in kernel memory so that drivers can use it when
* system isn't ready for them to request firmware image from userspace.
* Once it returns successfully, driver can use request_firmware or its
* nowait version to get the cached firmware without any interacting
* with userspace
*
* Return 0 if the firmware image has been cached successfully
* Return !0 otherwise
*
*/
static int cache_firmware(const char *fw_name)
{
int ret;
const struct firmware *fw;
pr_debug("%s: %s\n", __func__, fw_name);
ret = request_firmware(&fw, fw_name, NULL);
if (!ret)
kfree(fw);
pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
return ret;
}
static struct firmware_buf *fw_lookup_buf(const char *fw_name)
{
struct firmware_buf *tmp;
struct firmware_cache *fwc = &fw_cache;
spin_lock(&fwc->lock);
tmp = __fw_lookup_buf(fw_name);
spin_unlock(&fwc->lock);
return tmp;
}
/**
* uncache_firmware - remove one cached firmware image
* @fw_name: the firmware image name
*
* Uncache one firmware image which has been cached successfully
* before.
*
* Return 0 if the firmware cache has been removed successfully
* Return !0 otherwise
*
*/
static int uncache_firmware(const char *fw_name)
{
struct firmware_buf *buf;
struct firmware fw;
pr_debug("%s: %s\n", __func__, fw_name);
if (fw_get_builtin_firmware(&fw, fw_name))
return 0;
buf = fw_lookup_buf(fw_name);
if (buf) {
fw_free_buf(buf);
return 0;
}
return -EINVAL;
}
static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
{
struct fw_cache_entry *fce;
fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC);
if (!fce)
goto exit;
strcpy(fce->name, name);
exit:
return fce;
}
static int __fw_entry_found(const char *name)
{
struct firmware_cache *fwc = &fw_cache;
struct fw_cache_entry *fce;
list_for_each_entry(fce, &fwc->fw_names, list) {
if (!strcmp(fce->name, name))
return 1;
}
return 0;
}
static int fw_cache_piggyback_on_request(const char *name)
{
struct firmware_cache *fwc = &fw_cache;
struct fw_cache_entry *fce;
int ret = 0;
spin_lock(&fwc->name_lock);
if (__fw_entry_found(name))
goto found;
fce = alloc_fw_cache_entry(name);
if (fce) {
ret = 1;
list_add(&fce->list, &fwc->fw_names);
pr_debug("%s: fw: %s\n", __func__, name);
}
found:
spin_unlock(&fwc->name_lock);
return ret;
}
static void free_fw_cache_entry(struct fw_cache_entry *fce)
{
kfree(fce);
}
static void __async_dev_cache_fw_image(void *fw_entry,
async_cookie_t cookie)
{
struct fw_cache_entry *fce = fw_entry;
struct firmware_cache *fwc = &fw_cache;
int ret;
ret = cache_firmware(fce->name);
if (ret) {
spin_lock(&fwc->name_lock);
list_del(&fce->list);
spin_unlock(&fwc->name_lock);
free_fw_cache_entry(fce);
}
}
/* called with dev->devres_lock held */
static void dev_create_fw_entry(struct device *dev, void *res,
void *data)
{
struct fw_name_devm *fwn = res;
const char *fw_name = fwn->name;
struct list_head *head = data;
struct fw_cache_entry *fce;
fce = alloc_fw_cache_entry(fw_name);
if (fce)
list_add(&fce->list, head);
}
static int devm_name_match(struct device *dev, void *res,
void *match_data)
{
struct fw_name_devm *fwn = res;
return (fwn->magic == (unsigned long)match_data);
}
static void dev_cache_fw_image(struct device *dev, void *data)
{
LIST_HEAD(todo);
struct fw_cache_entry *fce;
struct fw_cache_entry *fce_next;
struct firmware_cache *fwc = &fw_cache;
devres_for_each_res(dev, fw_name_devm_release,
devm_name_match, &fw_cache,
dev_create_fw_entry, &todo);
list_for_each_entry_safe(fce, fce_next, &todo, list) {
list_del(&fce->list);
spin_lock(&fwc->name_lock);
/* only one cache entry for one firmware */
if (!__fw_entry_found(fce->name)) {
list_add(&fce->list, &fwc->fw_names);
} else {
free_fw_cache_entry(fce);
fce = NULL;
}
spin_unlock(&fwc->name_lock);
if (fce)
async_schedule_domain(__async_dev_cache_fw_image,
(void *)fce,
&fw_cache_domain);
}
}
static void __device_uncache_fw_images(void)
{
struct firmware_cache *fwc = &fw_cache;
struct fw_cache_entry *fce;
spin_lock(&fwc->name_lock);
while (!list_empty(&fwc->fw_names)) {
fce = list_entry(fwc->fw_names.next,
struct fw_cache_entry, list);
list_del(&fce->list);
spin_unlock(&fwc->name_lock);
uncache_firmware(fce->name);
free_fw_cache_entry(fce);
spin_lock(&fwc->name_lock);
}
spin_unlock(&fwc->name_lock);
}
/**
* device_cache_fw_images - cache devices' firmware
*
* If one device called request_firmware or its nowait version
* successfully before, the firmware names are recored into the
* device's devres link list, so device_cache_fw_images can call
* cache_firmware() to cache these firmwares for the device,
* then the device driver can load its firmwares easily at
* time when system is not ready to complete loading firmware.
*/
static void device_cache_fw_images(void)
{
struct firmware_cache *fwc = &fw_cache;
int old_timeout;
DEFINE_WAIT(wait);
pr_debug("%s\n", __func__);
/* cancel uncache work */
cancel_delayed_work_sync(&fwc->work);
/*
* use small loading timeout for caching devices' firmware
* because all these firmware images have been loaded
* successfully at lease once, also system is ready for
* completing firmware loading now. The maximum size of
* firmware in current distributions is about 2M bytes,
* so 10 secs should be enough.
*/
old_timeout = loading_timeout;
loading_timeout = 10;
mutex_lock(&fw_lock);
fwc->state = FW_LOADER_START_CACHE;
dpm_for_each_dev(NULL, dev_cache_fw_image);
mutex_unlock(&fw_lock);
/* wait for completion of caching firmware for all devices */
async_synchronize_full_domain(&fw_cache_domain);
loading_timeout = old_timeout;
}
/**
* device_uncache_fw_images - uncache devices' firmware
*
* uncache all firmwares which have been cached successfully
* by device_uncache_fw_images earlier
*/
static void device_uncache_fw_images(void)
{
pr_debug("%s\n", __func__);
__device_uncache_fw_images();
}
static void device_uncache_fw_images_work(struct work_struct *work)
{
device_uncache_fw_images();
}
/**
* device_uncache_fw_images_delay - uncache devices firmwares
* @delay: number of milliseconds to delay uncache device firmwares
*
* uncache all devices's firmwares which has been cached successfully
* by device_cache_fw_images after @delay milliseconds.
*/
static void device_uncache_fw_images_delay(unsigned long delay)
{
schedule_delayed_work(&fw_cache.work,
msecs_to_jiffies(delay));
}
static int fw_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
kill_requests_without_uevent();
device_cache_fw_images();
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
/*
* In case that system sleep failed and syscore_suspend is
* not called.
*/
mutex_lock(&fw_lock);
fw_cache.state = FW_LOADER_NO_CACHE;
mutex_unlock(&fw_lock);
device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
break;
}
return 0;
}
/* stop caching firmware once syscore_suspend is reached */
static int fw_suspend(void)
{
fw_cache.state = FW_LOADER_NO_CACHE;
return 0;
}
static struct syscore_ops fw_syscore_ops = {
.suspend = fw_suspend,
};
#else
static int fw_cache_piggyback_on_request(const char *name)
{
return 0;
}
#endif
static void __init fw_cache_init(void)
{
spin_lock_init(&fw_cache.lock);
INIT_LIST_HEAD(&fw_cache.head);
fw_cache.state = FW_LOADER_NO_CACHE;
#ifdef CONFIG_PM_SLEEP
spin_lock_init(&fw_cache.name_lock);
INIT_LIST_HEAD(&fw_cache.fw_names);
INIT_DELAYED_WORK(&fw_cache.work,
device_uncache_fw_images_work);
fw_cache.pm_notify.notifier_call = fw_pm_notify;
register_pm_notifier(&fw_cache.pm_notify);
register_syscore_ops(&fw_syscore_ops);
#endif
}
static int __init firmware_class_init(void)
{
fw_cache_init();
#ifdef CONFIG_FW_LOADER_USER_HELPER
register_reboot_notifier(&fw_shutdown_nb);
return class_register(&firmware_class);
#else
return 0;
#endif
}
static void __exit firmware_class_exit(void)
{
#ifdef CONFIG_PM_SLEEP
unregister_syscore_ops(&fw_syscore_ops);
unregister_pm_notifier(&fw_cache.pm_notify);
#endif
#ifdef CONFIG_FW_LOADER_USER_HELPER
unregister_reboot_notifier(&fw_shutdown_nb);
class_unregister(&firmware_class);
#endif
}
fs_initcall(firmware_class_init);
module_exit(firmware_class_exit);
| gpl-2.0 |
roggin/iconia-a500-kernel | drivers/net/ixgbe/ixgbe_dcb_82599.c | 90 | 14067 | /*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82599.h"
/**
* ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
* @hw: pointer to hardware structure
* @stats: pointer to statistics structure
* @tc_count: Number of elements in bwg_array.
*
* This function returns the status data for each of the Traffic Classes in use.
*/
s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *stats,
u8 tc_count)
{
int tc;
if (tc_count > MAX_TRAFFIC_CLASS)
return DCB_ERR_PARAM;
/* Statistics pertaining to each traffic class */
for (tc = 0; tc < tc_count; tc++) {
/* Transmitted Packets */
stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
/* Transmitted Bytes */
stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
/* Received Packets */
stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
/* Received Bytes */
stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
}
return 0;
}
/**
* ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
* @hw: pointer to hardware structure
* @stats: pointer to statistics structure
* @tc_count: Number of elements in bwg_array.
*
* This function returns the CBFC status data for each of the Traffic Classes.
*/
s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *stats,
u8 tc_count)
{
int tc;
if (tc_count > MAX_TRAFFIC_CLASS)
return DCB_ERR_PARAM;
for (tc = 0; tc < tc_count; tc++) {
/* Priority XOFF Transmitted */
stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
/* Priority XOFF Received */
stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
}
return 0;
}
/**
* ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure packet buffers for DCB mode.
*/
s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret_val = 0;
u32 value = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
/* Setup Rx packet buffer sizes */
switch (dcb_config->rx_pba_cfg) {
case pba_80_48:
/* Setup the first four at 80KB */
value = IXGBE_RXPBSIZE_80KB;
for (; i < 4; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
/* Setup the last four at 48KB...don't re-init i */
value = IXGBE_RXPBSIZE_48KB;
/* Fall Through */
case pba_equal:
default:
for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
/* Setup Tx packet buffer sizes */
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
IXGBE_TXPBSIZE_20KB);
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i),
IXGBE_TXPBTHRESH_DCB);
}
break;
}
return ret_val;
}
/**
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
/*
* Disable the arbiter before changing parameters
* (always enable recycle mode; WSP)
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
/* Map all traffic classes to their UP, 1 to 1 */
reg = 0;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
credit_refill = p->data_credits_refill;
credit_max = p->data_credits_max;
reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
if (p->prio_type == prio_link)
reg |= IXGBE_RTRPT4C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
}
/*
* Configure Rx packet plane (recycle mode; WSP) and
* enable arbiter
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
u32 reg, max_credits;
u8 i;
/* Clear the per-Tx queue credits; we use per-TC instead */
for (i = 0; i < 128; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
}
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
max_credits = dcb_config->tc_config[i].desc_credits_max;
reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
reg |= p->data_credits_refill;
reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
if (p->prio_type == prio_group)
reg |= IXGBE_RTTDT2C_GSP;
if (p->prio_type == prio_link)
reg |= IXGBE_RTTDT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
}
/*
* Configure Tx descriptor plane (recycle mode; WSP) and
* enable arbiter
*/
reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
struct tc_bw_alloc *p;
u32 reg;
u8 i;
/*
* Disable the arbiter before changing parameters
* (always enable recycle mode; SP; arb delay)
*/
reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
(IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
IXGBE_RTTPCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
/* Map all traffic classes to their UP, 1 to 1 */
reg = 0;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
reg = p->data_credits_refill;
reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
if (p->prio_type == prio_group)
reg |= IXGBE_RTTPT2C_GSP;
if (p->prio_type == prio_link)
reg |= IXGBE_RTTPT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
}
/*
* Configure Tx packet plane (recycle mode; SP; arb delay) and
* enable arbiter
*/
reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
(IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
return 0;
}
/**
* ixgbe_dcb_config_pfc_82599 - Configure priority flow control
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
u32 i, reg, rx_pba_size;
/* If PFC is disabled globally then fall back to LFC. */
if (!dcb_config->pfc_mode_enable) {
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
hw->mac.ops.fc_enable(hw, i);
goto out;
}
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (dcb_config->rx_pba_cfg == pba_equal)
rx_pba_size = IXGBE_RXPBSIZE_64KB;
else
rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
: IXGBE_RXPBSIZE_48KB;
reg = ((rx_pba_size >> 5) & 0xFFE0);
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
reg = ((rx_pba_size >> 2) & 0xFFE0);
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
}
/* Configure pause time (2 TCs per register) */
reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
/* Enable Transmit PFC */
reg = IXGBE_FCCFG_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
/*
* Enable Receive PFC
* We will always honor XOFF frames we receive when
* we are in PFC mode.
*/
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
reg &= ~IXGBE_MFLCN_RFCE;
reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
out:
return 0;
}
/**
* ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
/*
* Receive Queues stats setting
* 32 RQSMR registers, each configuring 4 queues.
* Set all 16 queues of each TC to the same stat
* with TC 'n' going to stat 'n'.
*/
for (i = 0; i < 32; i++) {
reg = 0x01010101 * (i / 4);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
}
/*
* Transmit Queues stats setting
* 32 TQSM registers, each controlling 4 queues.
* Set all queues of each TC to the same stat
* with TC 'n' going to stat 'n'.
* Tx queues are allocated non-uniformly to TCs:
* 32, 32, 16, 16, 8, 8, 8, 8.
*/
for (i = 0; i < 32; i++) {
if (i < 8)
reg = 0x00000000;
else if (i < 16)
reg = 0x01010101;
else if (i < 20)
reg = 0x02020202;
else if (i < 24)
reg = 0x03030303;
else if (i < 26)
reg = 0x04040404;
else if (i < 28)
reg = 0x05050505;
else if (i < 30)
reg = 0x06060606;
else
reg = 0x07070707;
IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_config_82599 - Configure general DCB parameters
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure general DCB parameters.
*/
s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
{
u32 reg;
u32 q;
/* Disable the Tx desc arbiter so that MTQC can be changed */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg |= IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
/* Enable DCB for Rx with 8 TCs */
reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
switch (reg & IXGBE_MRQC_MRQE_MASK) {
case 0:
case IXGBE_MRQC_RT4TCEN:
/* RSS disabled cases */
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
break;
case IXGBE_MRQC_RSSEN:
case IXGBE_MRQC_RTRSS4TCEN:
/* RSS enabled cases */
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
break;
default:
/* Unsupported value, assume stale data, overwrite no RSS */
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
/* Enable DCB for Tx with 8 TCs */
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
/* Disable drop for all queues */
for (q = 0; q < 128; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg &= ~IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
return 0;
}
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
ixgbe_dcb_config_82599(hw);
ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
ixgbe_dcb_config_pfc_82599(hw, dcb_config);
ixgbe_dcb_config_tc_stats_82599(hw);
return 0;
}
| gpl-2.0 |
Distrotech/linux | drivers/pci/host/pcie-hisi.c | 90 | 4982 | /*
* PCIe host controller driver for HiSilicon Hip05 SoC
*
* Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com
*
* Author: Zhou Wang <wangzhou1@hisilicon.com>
* Dacai Zhu <zhudacai@hisilicon.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "pcie-designware.h"
#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
#define PCIE_LTSSM_LINKUP_STATE 0x11
#define PCIE_LTSSM_STATE_MASK 0x3F
#define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp)
struct hisi_pcie {
struct regmap *subctrl;
void __iomem *reg_base;
u32 port_id;
struct pcie_port pp;
};
static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie,
u32 val, u32 reg)
{
writel(val, pcie->reg_base + reg);
}
static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg)
{
return readl(pcie->reg_base + reg);
}
/* Hip05 PCIe host only supports 32-bit config access */
static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
u32 *val)
{
u32 reg;
u32 reg_val;
struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = ®_val;
walker += (where & 0x3);
reg = where & ~0x3;
reg_val = hisi_pcie_apb_readl(pcie, reg);
if (size == 1)
*val = *(u8 __force *) walker;
else if (size == 2)
*val = *(u16 __force *) walker;
else if (size == 4)
*val = reg_val;
else
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
/* Hip05 PCIe host only supports 32-bit config access */
static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
u32 val)
{
u32 reg_val;
u32 reg;
struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = ®_val;
walker += (where & 0x3);
reg = where & ~0x3;
if (size == 4)
hisi_pcie_apb_writel(pcie, val, reg);
else if (size == 2) {
reg_val = hisi_pcie_apb_readl(pcie, reg);
*(u16 __force *) walker = val;
hisi_pcie_apb_writel(pcie, reg_val, reg);
} else if (size == 1) {
reg_val = hisi_pcie_apb_readl(pcie, reg);
*(u8 __force *) walker = val;
hisi_pcie_apb_writel(pcie, reg_val, reg);
} else
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
static int hisi_pcie_link_up(struct pcie_port *pp)
{
u32 val;
struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG +
0x100 * hisi_pcie->port_id, &val);
return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
}
static struct pcie_host_ops hisi_pcie_host_ops = {
.rd_own_conf = hisi_pcie_cfg_read,
.wr_own_conf = hisi_pcie_cfg_write,
.link_up = hisi_pcie_link_up,
};
static int hisi_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
{
int ret;
u32 port_id;
struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) {
dev_err(&pdev->dev, "failed to read port-id\n");
return -EINVAL;
}
if (port_id > 3) {
dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id);
return -EINVAL;
}
hisi_pcie->port_id = port_id;
pp->ops = &hisi_pcie_host_ops;
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
return ret;
}
return 0;
}
static int hisi_pcie_probe(struct platform_device *pdev)
{
struct hisi_pcie *hisi_pcie;
struct pcie_port *pp;
struct resource *reg;
int ret;
hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL);
if (!hisi_pcie)
return -ENOMEM;
pp = &hisi_pcie->pp;
pp->dev = &pdev->dev;
hisi_pcie->subctrl =
syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
if (IS_ERR(hisi_pcie->subctrl)) {
dev_err(pp->dev, "cannot get subctrl base\n");
return PTR_ERR(hisi_pcie->subctrl);
}
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg);
if (IS_ERR(hisi_pcie->reg_base)) {
dev_err(pp->dev, "cannot get rc_dbi base\n");
return PTR_ERR(hisi_pcie->reg_base);
}
hisi_pcie->pp.dbi_base = hisi_pcie->reg_base;
ret = hisi_add_pcie_port(pp, pdev);
if (ret)
return ret;
platform_set_drvdata(pdev, hisi_pcie);
dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
return 0;
}
static const struct of_device_id hisi_pcie_of_match[] = {
{.compatible = "hisilicon,hip05-pcie",},
{},
};
MODULE_DEVICE_TABLE(of, hisi_pcie_of_match);
static struct platform_driver hisi_pcie_driver = {
.probe = hisi_pcie_probe,
.driver = {
.name = "hisi-pcie",
.of_match_table = hisi_pcie_of_match,
},
};
module_platform_driver(hisi_pcie_driver);
| gpl-2.0 |
netico-solutions/linux_3.2.0_android_4.2.2 | drivers/net/wireless/ath/ath5k/debug.c | 346 | 31785 | /*
* Copyright (c) 2007-2008 Bruno Randolf <bruno@thinktube.com>
*
* This file is free software: you may copy, redistribute and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 2 of the License, or (at your
* option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* This file incorporates work covered by the following copyright and
* permission notice:
*
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* Copyright (c) 2004-2005 Atheros Communications, Inc.
* Copyright (c) 2006 Devicescape Software, Inc.
* Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
* Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include "debug.h"
#include "ath5k.h"
#include "reg.h"
#include "base.h"
static unsigned int ath5k_debug;
module_param_named(debug, ath5k_debug, uint, 0);
static int ath5k_debugfs_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
/* debugfs: registers */
struct reg {
const char *name;
int addr;
};
#define REG_STRUCT_INIT(r) { #r, r }
/* just a few random registers, might want to add more */
static const struct reg regs[] = {
REG_STRUCT_INIT(AR5K_CR),
REG_STRUCT_INIT(AR5K_RXDP),
REG_STRUCT_INIT(AR5K_CFG),
REG_STRUCT_INIT(AR5K_IER),
REG_STRUCT_INIT(AR5K_BCR),
REG_STRUCT_INIT(AR5K_RTSD0),
REG_STRUCT_INIT(AR5K_RTSD1),
REG_STRUCT_INIT(AR5K_TXCFG),
REG_STRUCT_INIT(AR5K_RXCFG),
REG_STRUCT_INIT(AR5K_RXJLA),
REG_STRUCT_INIT(AR5K_MIBC),
REG_STRUCT_INIT(AR5K_TOPS),
REG_STRUCT_INIT(AR5K_RXNOFRM),
REG_STRUCT_INIT(AR5K_TXNOFRM),
REG_STRUCT_INIT(AR5K_RPGTO),
REG_STRUCT_INIT(AR5K_RFCNT),
REG_STRUCT_INIT(AR5K_MISC),
REG_STRUCT_INIT(AR5K_QCUDCU_CLKGT),
REG_STRUCT_INIT(AR5K_ISR),
REG_STRUCT_INIT(AR5K_PISR),
REG_STRUCT_INIT(AR5K_SISR0),
REG_STRUCT_INIT(AR5K_SISR1),
REG_STRUCT_INIT(AR5K_SISR2),
REG_STRUCT_INIT(AR5K_SISR3),
REG_STRUCT_INIT(AR5K_SISR4),
REG_STRUCT_INIT(AR5K_IMR),
REG_STRUCT_INIT(AR5K_PIMR),
REG_STRUCT_INIT(AR5K_SIMR0),
REG_STRUCT_INIT(AR5K_SIMR1),
REG_STRUCT_INIT(AR5K_SIMR2),
REG_STRUCT_INIT(AR5K_SIMR3),
REG_STRUCT_INIT(AR5K_SIMR4),
REG_STRUCT_INIT(AR5K_DCM_ADDR),
REG_STRUCT_INIT(AR5K_DCCFG),
REG_STRUCT_INIT(AR5K_CCFG),
REG_STRUCT_INIT(AR5K_CPC0),
REG_STRUCT_INIT(AR5K_CPC1),
REG_STRUCT_INIT(AR5K_CPC2),
REG_STRUCT_INIT(AR5K_CPC3),
REG_STRUCT_INIT(AR5K_CPCOVF),
REG_STRUCT_INIT(AR5K_RESET_CTL),
REG_STRUCT_INIT(AR5K_SLEEP_CTL),
REG_STRUCT_INIT(AR5K_INTPEND),
REG_STRUCT_INIT(AR5K_SFR),
REG_STRUCT_INIT(AR5K_PCICFG),
REG_STRUCT_INIT(AR5K_GPIOCR),
REG_STRUCT_INIT(AR5K_GPIODO),
REG_STRUCT_INIT(AR5K_SREV),
};
static void *reg_start(struct seq_file *seq, loff_t *pos)
{
return *pos < ARRAY_SIZE(regs) ? (void *)®s[*pos] : NULL;
}
static void reg_stop(struct seq_file *seq, void *p)
{
/* nothing to do */
}
static void *reg_next(struct seq_file *seq, void *p, loff_t *pos)
{
++*pos;
return *pos < ARRAY_SIZE(regs) ? (void *)®s[*pos] : NULL;
}
static int reg_show(struct seq_file *seq, void *p)
{
struct ath5k_hw *ah = seq->private;
struct reg *r = p;
seq_printf(seq, "%-25s0x%08x\n", r->name,
ath5k_hw_reg_read(ah, r->addr));
return 0;
}
static const struct seq_operations register_seq_ops = {
.start = reg_start,
.next = reg_next,
.stop = reg_stop,
.show = reg_show
};
static int open_file_registers(struct inode *inode, struct file *file)
{
struct seq_file *s;
int res;
res = seq_open(file, ®ister_seq_ops);
if (res == 0) {
s = file->private_data;
s->private = inode->i_private;
}
return res;
}
static const struct file_operations fops_registers = {
.open = open_file_registers,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.owner = THIS_MODULE,
};
/* debugfs: beacons */
static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[500];
unsigned int len = 0;
unsigned int v;
u64 tsf;
v = ath5k_hw_reg_read(ah, AR5K_BEACON);
len += snprintf(buf + len, sizeof(buf) - len,
"%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
"AR5K_BEACON", v, v & AR5K_BEACON_PERIOD,
(v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S);
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
"AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP));
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
"AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT));
v = ath5k_hw_reg_read(ah, AR5K_TIMER0);
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER0 (TBTT)", v, v);
v = ath5k_hw_reg_read(ah, AR5K_TIMER1);
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER1 (DMA)", v, v >> 3);
v = ath5k_hw_reg_read(ah, AR5K_TIMER2);
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER2 (SWBA)", v, v >> 3);
v = ath5k_hw_reg_read(ah, AR5K_TIMER3);
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER3 (ATIM)", v, v);
tsf = ath5k_hw_get_tsf64(ah);
len += snprintf(buf + len, sizeof(buf) - len,
"TSF\t\t0x%016llx\tTU: %08x\n",
(unsigned long long)tsf, TSF_TO_TU(tsf));
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_beacon(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
return -EFAULT;
if (strncmp(buf, "disable", 7) == 0) {
AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
printk(KERN_INFO "debugfs disable beacons\n");
} else if (strncmp(buf, "enable", 6) == 0) {
AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
printk(KERN_INFO "debugfs enable beacons\n");
}
return count;
}
static const struct file_operations fops_beacon = {
.read = read_file_beacon,
.write = write_file_beacon,
.open = ath5k_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: reset */
static ssize_t write_file_reset(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
return count;
}
static const struct file_operations fops_reset = {
.write = write_file_reset,
.open = ath5k_debugfs_open,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
/* debugfs: debug level */
static const struct {
enum ath5k_debug_level level;
const char *name;
const char *desc;
} dbg_info[] = {
{ ATH5K_DEBUG_RESET, "reset", "reset and initialization" },
{ ATH5K_DEBUG_INTR, "intr", "interrupt handling" },
{ ATH5K_DEBUG_MODE, "mode", "mode init/setup" },
{ ATH5K_DEBUG_XMIT, "xmit", "basic xmit operation" },
{ ATH5K_DEBUG_BEACON, "beacon", "beacon handling" },
{ ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
{ ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
{ ATH5K_DEBUG_LED, "led", "LED management" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_DMA, "dma", "dma start/stop" },
{ ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_DESC, "desc", "descriptor chains" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
};
static ssize_t read_file_debug(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
unsigned int i;
len += snprintf(buf + len, sizeof(buf) - len,
"DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) {
len += snprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
ah->debug.level & dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
}
len += snprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
ah->debug.level == dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_debug(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
unsigned int i;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
return -EFAULT;
for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
if (strncmp(buf, dbg_info[i].name,
strlen(dbg_info[i].name)) == 0) {
ah->debug.level ^= dbg_info[i].level; /* toggle bit */
break;
}
}
return count;
}
static const struct file_operations fops_debug = {
.read = read_file_debug,
.write = write_file_debug,
.open = ath5k_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: antenna */
static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
unsigned int i;
unsigned int v;
len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
ah->ah_ant_mode);
len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
ah->ah_def_ant);
len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
ah->ah_tx_ant);
len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
len += snprintf(buf + len, sizeof(buf) - len,
"[antenna %d]\t%d\t%d\n",
i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]);
}
len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]);
v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
len += snprintf(buf + len, sizeof(buf) - len,
"\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
v = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
(v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL);
len += snprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
(v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART);
len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
(v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV);
len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
(v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
len += snprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_antenna(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
unsigned int i;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
return -EFAULT;
if (strncmp(buf, "diversity", 9) == 0) {
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
printk(KERN_INFO "ath5k debug: enable diversity\n");
} else if (strncmp(buf, "fixed-a", 7) == 0) {
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
} else if (strncmp(buf, "fixed-b", 7) == 0) {
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
printk(KERN_INFO "ath5k debug: fixed antenna B\n");
} else if (strncmp(buf, "clear", 5) == 0) {
for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
ah->stats.antenna_rx[i] = 0;
ah->stats.antenna_tx[i] = 0;
}
printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
}
return count;
}
static const struct file_operations fops_antenna = {
.read = read_file_antenna,
.write = write_file_antenna,
.open = ath5k_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: misc */
static ssize_t read_file_misc(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
u32 filt = ath5k_hw_get_rx_filter(ah);
len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
ah->bssidmask);
len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
filt);
if (filt & AR5K_RX_FILTER_UCAST)
len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
if (filt & AR5K_RX_FILTER_MCAST)
len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
if (filt & AR5K_RX_FILTER_BCAST)
len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
if (filt & AR5K_RX_FILTER_CONTROL)
len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (filt & AR5K_RX_FILTER_BEACON)
len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
if (filt & AR5K_RX_FILTER_PROM)
len += snprintf(buf + len, sizeof(buf) - len, " PROM");
if (filt & AR5K_RX_FILTER_XRPOLL)
len += snprintf(buf + len, sizeof(buf) - len, " XRPOLL");
if (filt & AR5K_RX_FILTER_PROBEREQ)
len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (filt & AR5K_RX_FILTER_PHYERR_5212)
len += snprintf(buf + len, sizeof(buf) - len, " PHYERR-5212");
if (filt & AR5K_RX_FILTER_RADARERR_5212)
len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5212");
if (filt & AR5K_RX_FILTER_PHYERR_5211)
snprintf(buf + len, sizeof(buf) - len, " PHYERR-5211");
if (filt & AR5K_RX_FILTER_RADARERR_5211)
len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
ath_opmode_to_string(ah->opmode), ah->opmode);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_misc = {
.read = read_file_misc,
.open = ath5k_debugfs_open,
.owner = THIS_MODULE,
};
/* debugfs: frameerrors */
static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
struct ath5k_statistics *st = &ah->stats;
char buf[700];
unsigned int len = 0;
int i;
len += snprintf(buf + len, sizeof(buf) - len,
"RX\n---------------------\n");
len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
st->rxerr_crc,
st->rx_all_count > 0 ?
st->rxerr_crc * 100 / st->rx_all_count : 0);
len += snprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n",
st->rxerr_phy,
st->rx_all_count > 0 ?
st->rxerr_phy * 100 / st->rx_all_count : 0);
for (i = 0; i < 32; i++) {
if (st->rxerr_phy_code[i])
len += snprintf(buf + len, sizeof(buf) - len,
" phy_err[%u]\t%u\n",
i, st->rxerr_phy_code[i]);
}
len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->rxerr_fifo,
st->rx_all_count > 0 ?
st->rxerr_fifo * 100 / st->rx_all_count : 0);
len += snprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n",
st->rxerr_decrypt,
st->rx_all_count > 0 ?
st->rxerr_decrypt * 100 / st->rx_all_count : 0);
len += snprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n",
st->rxerr_mic,
st->rx_all_count > 0 ?
st->rxerr_mic * 100 / st->rx_all_count : 0);
len += snprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n",
st->rxerr_proc,
st->rx_all_count > 0 ?
st->rxerr_proc * 100 / st->rx_all_count : 0);
len += snprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n",
st->rxerr_jumbo,
st->rx_all_count > 0 ?
st->rxerr_jumbo * 100 / st->rx_all_count : 0);
len += snprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n",
st->rx_all_count);
len += snprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n",
st->rx_bytes_count);
len += snprintf(buf + len, sizeof(buf) - len,
"\nTX\n---------------------\n");
len += snprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n",
st->txerr_retry,
st->tx_all_count > 0 ?
st->txerr_retry * 100 / st->tx_all_count : 0);
len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->txerr_fifo,
st->tx_all_count > 0 ?
st->txerr_fifo * 100 / st->tx_all_count : 0);
len += snprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n",
st->txerr_filt,
st->tx_all_count > 0 ?
st->txerr_filt * 100 / st->tx_all_count : 0);
len += snprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n",
st->tx_all_count);
len += snprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n",
st->tx_bytes_count);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_frameerrors(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
struct ath5k_statistics *st = &ah->stats;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
return -EFAULT;
if (strncmp(buf, "clear", 5) == 0) {
st->rxerr_crc = 0;
st->rxerr_phy = 0;
st->rxerr_fifo = 0;
st->rxerr_decrypt = 0;
st->rxerr_mic = 0;
st->rxerr_proc = 0;
st->rxerr_jumbo = 0;
st->rx_all_count = 0;
st->txerr_retry = 0;
st->txerr_fifo = 0;
st->txerr_filt = 0;
st->tx_all_count = 0;
printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n");
}
return count;
}
static const struct file_operations fops_frameerrors = {
.read = read_file_frameerrors,
.write = write_file_frameerrors,
.open = ath5k_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: ani */
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
struct ath5k_statistics *st = &ah->stats;
struct ath5k_ani_state *as = &ah->ani_state;
char buf[700];
unsigned int len = 0;
len += snprintf(buf + len, sizeof(buf) - len,
"HW has PHY error counters:\t%s\n",
ah->ah_capabilities.cap_has_phyerr_counters ?
"yes" : "no");
len += snprintf(buf + len, sizeof(buf) - len,
"HW max spur immunity level:\t%d\n",
as->max_spur_level);
len += snprintf(buf + len, sizeof(buf) - len,
"\nANI state\n--------------------------------------------\n");
len += snprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t");
switch (as->ani_mode) {
case ATH5K_ANI_MODE_OFF:
len += snprintf(buf + len, sizeof(buf) - len, "OFF\n");
break;
case ATH5K_ANI_MODE_MANUAL_LOW:
len += snprintf(buf + len, sizeof(buf) - len,
"MANUAL LOW\n");
break;
case ATH5K_ANI_MODE_MANUAL_HIGH:
len += snprintf(buf + len, sizeof(buf) - len,
"MANUAL HIGH\n");
break;
case ATH5K_ANI_MODE_AUTO:
len += snprintf(buf + len, sizeof(buf) - len, "AUTO\n");
break;
default:
len += snprintf(buf + len, sizeof(buf) - len,
"??? (not good)\n");
break;
}
len += snprintf(buf + len, sizeof(buf) - len,
"noise immunity level:\t\t%d\n",
as->noise_imm_level);
len += snprintf(buf + len, sizeof(buf) - len,
"spur immunity level:\t\t%d\n",
as->spur_level);
len += snprintf(buf + len, sizeof(buf) - len,
"firstep level:\t\t\t%d\n",
as->firstep_level);
len += snprintf(buf + len, sizeof(buf) - len,
"OFDM weak signal detection:\t%s\n",
as->ofdm_weak_sig ? "on" : "off");
len += snprintf(buf + len, sizeof(buf) - len,
"CCK weak signal detection:\t%s\n",
as->cck_weak_sig ? "on" : "off");
len += snprintf(buf + len, sizeof(buf) - len,
"\nMIB INTERRUPTS:\t\t%u\n",
st->mib_intr);
len += snprintf(buf + len, sizeof(buf) - len,
"beacon RSSI average:\t%d\n",
(int)ewma_read(&ah->ah_beacon_rssi_avg));
#define CC_PRINT(_struct, _field) \
_struct._field, \
_struct.cycles > 0 ? \
_struct._field * 100 / _struct.cycles : 0
len += snprintf(buf + len, sizeof(buf) - len,
"profcnt tx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, tx_frame));
len += snprintf(buf + len, sizeof(buf) - len,
"profcnt rx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_frame));
len += snprintf(buf + len, sizeof(buf) - len,
"profcnt busy\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_busy));
#undef CC_PRINT
len += snprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n",
as->last_cc.cycles);
len += snprintf(buf + len, sizeof(buf) - len,
"listen time\t\t%d\tlast: %d\n",
as->listen_time, as->last_listen);
len += snprintf(buf + len, sizeof(buf) - len,
"OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
as->ofdm_errors, as->last_ofdm_errors,
as->sum_ofdm_errors);
len += snprintf(buf + len, sizeof(buf) - len,
"CCK errors\t\t%u\tlast: %u\tsum: %u\n",
as->cck_errors, as->last_cck_errors,
as->sum_cck_errors);
len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1),
ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)));
len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2),
ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)));
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_ani(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
return -EFAULT;
if (strncmp(buf, "sens-low", 8) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
} else if (strncmp(buf, "sens-high", 9) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_LOW);
} else if (strncmp(buf, "ani-off", 7) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
} else if (strncmp(buf, "ani-on", 6) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_AUTO);
} else if (strncmp(buf, "noise-low", 9) == 0) {
ath5k_ani_set_noise_immunity_level(ah, 0);
} else if (strncmp(buf, "noise-high", 10) == 0) {
ath5k_ani_set_noise_immunity_level(ah,
ATH5K_ANI_MAX_NOISE_IMM_LVL);
} else if (strncmp(buf, "spur-low", 8) == 0) {
ath5k_ani_set_spur_immunity_level(ah, 0);
} else if (strncmp(buf, "spur-high", 9) == 0) {
ath5k_ani_set_spur_immunity_level(ah,
ah->ani_state.max_spur_level);
} else if (strncmp(buf, "fir-low", 7) == 0) {
ath5k_ani_set_firstep_level(ah, 0);
} else if (strncmp(buf, "fir-high", 8) == 0) {
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
} else if (strncmp(buf, "ofdm-off", 8) == 0) {
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
} else if (strncmp(buf, "ofdm-on", 7) == 0) {
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
} else if (strncmp(buf, "cck-off", 7) == 0) {
ath5k_ani_set_cck_weak_signal_detection(ah, false);
} else if (strncmp(buf, "cck-on", 6) == 0) {
ath5k_ani_set_cck_weak_signal_detection(ah, true);
}
return count;
}
static const struct file_operations fops_ani = {
.read = read_file_ani,
.write = write_file_ani,
.open = ath5k_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: queues etc */
static ssize_t read_file_queue(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
struct ath5k_txq *txq;
struct ath5k_buf *bf, *bf0;
int i, n;
len += snprintf(buf + len, sizeof(buf) - len,
"available txbuffers: %d\n", ah->txbuf_len);
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
txq = &ah->txqs[i];
len += snprintf(buf + len, sizeof(buf) - len,
"%02d: %ssetup\n", i, txq->setup ? "" : "not ");
if (!txq->setup)
continue;
n = 0;
spin_lock_bh(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list)
n++;
spin_unlock_bh(&txq->lock);
len += snprintf(buf + len, sizeof(buf) - len,
" len: %d bufs: %d\n", txq->txq_len, n);
len += snprintf(buf + len, sizeof(buf) - len,
" stuck: %d\n", txq->txq_stuck);
}
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_queue(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
return -EFAULT;
if (strncmp(buf, "start", 5) == 0)
ieee80211_wake_queues(ah->hw);
else if (strncmp(buf, "stop", 4) == 0)
ieee80211_stop_queues(ah->hw);
return count;
}
static const struct file_operations fops_queue = {
.read = read_file_queue,
.write = write_file_queue,
.open = ath5k_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void
ath5k_debug_init_device(struct ath5k_hw *ah)
{
struct dentry *phydir;
ah->debug.level = ath5k_debug;
phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir);
if (!phydir)
return;
debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, ah,
&fops_debug);
debugfs_create_file("registers", S_IRUSR, phydir, ah, &fops_registers);
debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, ah,
&fops_beacon);
debugfs_create_file("reset", S_IWUSR, phydir, ah, &fops_reset);
debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, ah,
&fops_antenna);
debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc);
debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah,
&fops_frameerrors);
debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, ah, &fops_ani);
debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, ah,
&fops_queue);
debugfs_create_bool("32khz_clock", S_IWUSR | S_IRUSR, phydir,
&ah->ah_use_32khz_clock);
}
/* functions used in other places */
void
ath5k_debug_dump_bands(struct ath5k_hw *ah)
{
unsigned int b, i;
if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS)))
return;
BUG_ON(!ah->sbands);
for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
struct ieee80211_supported_band *band = &ah->sbands[b];
char bname[6];
switch (band->band) {
case IEEE80211_BAND_2GHZ:
strcpy(bname, "2 GHz");
break;
case IEEE80211_BAND_5GHZ:
strcpy(bname, "5 GHz");
break;
default:
printk(KERN_DEBUG "Band not supported: %d\n",
band->band);
return;
}
printk(KERN_DEBUG "Band %s: channels %d, rates %d\n", bname,
band->n_channels, band->n_bitrates);
printk(KERN_DEBUG " channels:\n");
for (i = 0; i < band->n_channels; i++)
printk(KERN_DEBUG " %3d %d %.4x %.4x\n",
ieee80211_frequency_to_channel(
band->channels[i].center_freq),
band->channels[i].center_freq,
band->channels[i].hw_value,
band->channels[i].flags);
printk(KERN_DEBUG " rates:\n");
for (i = 0; i < band->n_bitrates; i++)
printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n",
band->bitrates[i].bitrate,
band->bitrates[i].hw_value,
band->bitrates[i].flags,
band->bitrates[i].hw_value_short);
}
}
static inline void
ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
struct ath5k_rx_status *rs)
{
struct ath5k_desc *ds = bf->desc;
struct ath5k_hw_all_rx_desc *rd = &ds->ud.ds_rx;
printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n",
ds, (unsigned long long)bf->daddr,
ds->ds_link, ds->ds_data,
rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1,
rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1,
!done ? ' ' : (rs->rs_status == 0) ? '*' : '!');
}
void
ath5k_debug_printrxbuffs(struct ath5k_hw *ah)
{
struct ath5k_desc *ds;
struct ath5k_buf *bf;
struct ath5k_rx_status rs = {};
int status;
if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
return;
printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
ath5k_hw_get_rxdp(ah), ah->rxlink);
spin_lock_bh(&ah->rxbuflock);
list_for_each_entry(bf, &ah->rxbuf, list) {
ds = bf->desc;
status = ah->ah_proc_rx_desc(ah, ds, &rs);
if (!status)
ath5k_debug_printrxbuf(bf, status == 0, &rs);
}
spin_unlock_bh(&ah->rxbuflock);
}
void
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
struct ath5k_desc *ds = bf->desc;
struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212;
struct ath5k_tx_status ts = {};
int done;
if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
return;
done = ah->ah_proc_tx_desc(ah, bf->desc, &ts);
printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x "
"%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link,
ds->ds_data, td->tx_ctl.tx_control_0, td->tx_ctl.tx_control_1,
td->tx_ctl.tx_control_2, td->tx_ctl.tx_control_3,
td->tx_stat.tx_status_0, td->tx_stat.tx_status_1,
done ? ' ' : (ts.ts_status == 0) ? '*' : '!');
}
| gpl-2.0 |
andr00ib/stock-v10f-kernel-e730 | arch/sparc/prom/misc_64.c | 602 | 9384 | /*
* misc.c: Miscellaneous prom functions that don't belong
* anywhere else.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/system.h>
#include <asm/ldc.h>
int prom_service_exists(const char *service_name)
{
unsigned long args[5];
args[0] = (unsigned long) "test";
args[1] = 1;
args[2] = 1;
args[3] = (unsigned long) service_name;
args[4] = (unsigned long) -1;
p1275_cmd_direct(args);
if (args[4])
return 0;
return 1;
}
void prom_sun4v_guest_soft_state(void)
{
const char *svc = "SUNW,soft-state-supported";
unsigned long args[3];
if (!prom_service_exists(svc))
return;
args[0] = (unsigned long) svc;
args[1] = 0;
args[2] = 0;
p1275_cmd_direct(args);
}
/* Reset and reboot the machine with the command 'bcommand'. */
void prom_reboot(const char *bcommand)
{
unsigned long args[4];
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled)
ldom_reboot(bcommand);
#endif
args[0] = (unsigned long) "boot";
args[1] = 1;
args[2] = 0;
args[3] = (unsigned long) bcommand;
p1275_cmd_direct(args);
}
/* Forth evaluate the expression contained in 'fstring'. */
void prom_feval(const char *fstring)
{
unsigned long args[5];
if (!fstring || fstring[0] == 0)
return;
args[0] = (unsigned long) "interpret";
args[1] = 1;
args[2] = 1;
args[3] = (unsigned long) fstring;
args[4] = (unsigned long) -1;
p1275_cmd_direct(args);
}
EXPORT_SYMBOL(prom_feval);
#ifdef CONFIG_SMP
extern void smp_capture(void);
extern void smp_release(void);
#endif
/* Drop into the prom, with the chance to continue with the 'go'
* prom command.
*/
void prom_cmdline(void)
{
unsigned long args[3];
unsigned long flags;
local_irq_save(flags);
#ifdef CONFIG_SMP
smp_capture();
#endif
args[0] = (unsigned long) "enter";
args[1] = 0;
args[2] = 0;
p1275_cmd_direct(args);
#ifdef CONFIG_SMP
smp_release();
#endif
local_irq_restore(flags);
}
/* Drop into the prom, but completely terminate the program.
* No chance of continuing.
*/
void notrace prom_halt(void)
{
unsigned long args[3];
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled)
ldom_power_off();
#endif
again:
args[0] = (unsigned long) "exit";
args[1] = 0;
args[2] = 0;
p1275_cmd_direct(args);
goto again; /* PROM is out to get me -DaveM */
}
void prom_halt_power_off(void)
{
unsigned long args[3];
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled)
ldom_power_off();
#endif
args[0] = (unsigned long) "SUNW,power-off";
args[1] = 0;
args[2] = 0;
p1275_cmd_direct(args);
/* if nothing else helps, we just halt */
prom_halt();
}
/* Set prom sync handler to call function 'funcp'. */
void prom_setcallback(callback_func_t funcp)
{
unsigned long args[5];
if (!funcp)
return;
args[0] = (unsigned long) "set-callback";
args[1] = 1;
args[2] = 1;
args[3] = (unsigned long) funcp;
args[4] = (unsigned long) -1;
p1275_cmd_direct(args);
}
/* Get the idprom and stuff it into buffer 'idbuf'. Returns the
* format type. 'num_bytes' is the number of bytes that your idbuf
* has space for. Returns 0xff on error.
*/
unsigned char prom_get_idprom(char *idbuf, int num_bytes)
{
int len;
len = prom_getproplen(prom_root_node, "idprom");
if ((len >num_bytes) || (len == -1))
return 0xff;
if (!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes))
return idbuf[0];
return 0xff;
}
int prom_get_mmu_ihandle(void)
{
int node, ret;
if (prom_mmu_ihandle_cache != 0)
return prom_mmu_ihandle_cache;
node = prom_finddevice(prom_chosen_path);
ret = prom_getint(node, prom_mmu_name);
if (ret == -1 || ret == 0)
prom_mmu_ihandle_cache = -1;
else
prom_mmu_ihandle_cache = ret;
return ret;
}
static int prom_get_memory_ihandle(void)
{
static int memory_ihandle_cache;
int node, ret;
if (memory_ihandle_cache != 0)
return memory_ihandle_cache;
node = prom_finddevice("/chosen");
ret = prom_getint(node, "memory");
if (ret == -1 || ret == 0)
memory_ihandle_cache = -1;
else
memory_ihandle_cache = ret;
return ret;
}
/* Load explicit I/D TLB entries. */
static long tlb_load(const char *type, unsigned long index,
unsigned long tte_data, unsigned long vaddr)
{
unsigned long args[9];
args[0] = (unsigned long) prom_callmethod_name;
args[1] = 5;
args[2] = 1;
args[3] = (unsigned long) type;
args[4] = (unsigned int) prom_get_mmu_ihandle();
args[5] = vaddr;
args[6] = tte_data;
args[7] = index;
args[8] = (unsigned long) -1;
p1275_cmd_direct(args);
return (long) args[8];
}
long prom_itlb_load(unsigned long index,
unsigned long tte_data,
unsigned long vaddr)
{
return tlb_load("SUNW,itlb-load", index, tte_data, vaddr);
}
long prom_dtlb_load(unsigned long index,
unsigned long tte_data,
unsigned long vaddr)
{
return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr);
}
int prom_map(int mode, unsigned long size,
unsigned long vaddr, unsigned long paddr)
{
unsigned long args[11];
int ret;
args[0] = (unsigned long) prom_callmethod_name;
args[1] = 7;
args[2] = 1;
args[3] = (unsigned long) prom_map_name;
args[4] = (unsigned int) prom_get_mmu_ihandle();
args[5] = (unsigned int) mode;
args[6] = size;
args[7] = vaddr;
args[8] = 0;
args[9] = paddr;
args[10] = (unsigned long) -1;
p1275_cmd_direct(args);
ret = (int) args[10];
if (ret == 0)
ret = -1;
return ret;
}
void prom_unmap(unsigned long size, unsigned long vaddr)
{
unsigned long args[7];
args[0] = (unsigned long) prom_callmethod_name;
args[1] = 4;
args[2] = 0;
args[3] = (unsigned long) prom_unmap_name;
args[4] = (unsigned int) prom_get_mmu_ihandle();
args[5] = size;
args[6] = vaddr;
p1275_cmd_direct(args);
}
/* Set aside physical memory which is not touched or modified
* across soft resets.
*/
int prom_retain(const char *name, unsigned long size,
unsigned long align, unsigned long *paddr)
{
unsigned long args[11];
args[0] = (unsigned long) prom_callmethod_name;
args[1] = 5;
args[2] = 3;
args[3] = (unsigned long) "SUNW,retain";
args[4] = (unsigned int) prom_get_memory_ihandle();
args[5] = align;
args[6] = size;
args[7] = (unsigned long) name;
args[8] = (unsigned long) -1;
args[9] = (unsigned long) -1;
args[10] = (unsigned long) -1;
p1275_cmd_direct(args);
if (args[8])
return (int) args[8];
/* Next we get "phys_high" then "phys_low". On 64-bit
* the phys_high cell is don't care since the phys_low
* cell has the full value.
*/
*paddr = args[10];
return 0;
}
/* Get "Unumber" string for the SIMM at the given
* memory address. Usually this will be of the form
* "Uxxxx" where xxxx is a decimal number which is
* etched into the motherboard next to the SIMM slot
* in question.
*/
int prom_getunumber(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
unsigned long args[12];
args[0] = (unsigned long) prom_callmethod_name;
args[1] = 7;
args[2] = 2;
args[3] = (unsigned long) "SUNW,get-unumber";
args[4] = (unsigned int) prom_get_memory_ihandle();
args[5] = buflen;
args[6] = (unsigned long) buf;
args[7] = 0;
args[8] = phys_addr;
args[9] = (unsigned int) syndrome_code;
args[10] = (unsigned long) -1;
args[11] = (unsigned long) -1;
p1275_cmd_direct(args);
return (int) args[10];
}
/* Power management extensions. */
void prom_sleepself(void)
{
unsigned long args[3];
args[0] = (unsigned long) "SUNW,sleep-self";
args[1] = 0;
args[2] = 0;
p1275_cmd_direct(args);
}
int prom_sleepsystem(void)
{
unsigned long args[4];
args[0] = (unsigned long) "SUNW,sleep-system";
args[1] = 0;
args[2] = 1;
args[3] = (unsigned long) -1;
p1275_cmd_direct(args);
return (int) args[3];
}
int prom_wakeupsystem(void)
{
unsigned long args[4];
args[0] = (unsigned long) "SUNW,wakeup-system";
args[1] = 0;
args[2] = 1;
args[3] = (unsigned long) -1;
p1275_cmd_direct(args);
return (int) args[3];
}
#ifdef CONFIG_SMP
void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg)
{
unsigned long args[6];
args[0] = (unsigned long) "SUNW,start-cpu";
args[1] = 3;
args[2] = 0;
args[3] = (unsigned int) cpunode;
args[4] = pc;
args[5] = arg;
p1275_cmd_direct(args);
}
void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg)
{
unsigned long args[6];
args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid";
args[1] = 3;
args[2] = 0;
args[3] = (unsigned int) cpuid;
args[4] = pc;
args[5] = arg;
p1275_cmd_direct(args);
}
void prom_stopcpu_cpuid(int cpuid)
{
unsigned long args[4];
args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid";
args[1] = 1;
args[2] = 0;
args[3] = (unsigned int) cpuid;
p1275_cmd_direct(args);
}
void prom_stopself(void)
{
unsigned long args[3];
args[0] = (unsigned long) "SUNW,stop-self";
args[1] = 0;
args[2] = 0;
p1275_cmd_direct(args);
}
void prom_idleself(void)
{
unsigned long args[3];
args[0] = (unsigned long) "SUNW,idle-self";
args[1] = 0;
args[2] = 0;
p1275_cmd_direct(args);
}
void prom_resumecpu(int cpunode)
{
unsigned long args[4];
args[0] = (unsigned long) "SUNW,resume-cpu";
args[1] = 1;
args[2] = 0;
args[3] = (unsigned int) cpunode;
p1275_cmd_direct(args);
}
#endif
| gpl-2.0 |
XePeleato/android_ALE-L21_kernel | drivers/gpu/arm/malit6_64/midgard/mali_kbase_pm_demand.c | 602 | 1716 | /*
*
* (C) COPYRIGHT ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained
* from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
/**
* @file mali_kbase_pm_demand.c
* A simple demand based power management policy
*/
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
#if KBASE_PM_EN
static u64 demand_get_core_mask(struct kbase_device *kbdev)
{
u64 desired = kbdev->shader_needed_bitmap | kbdev->shader_inuse_bitmap;
if (0 == kbdev->pm.active_count)
return 0;
return desired;
}
static mali_bool demand_get_core_active(struct kbase_device *kbdev)
{
if (0 == kbdev->pm.active_count)
return MALI_FALSE;
return MALI_TRUE;
}
static void demand_init(struct kbase_device *kbdev)
{
CSTD_UNUSED(kbdev);
}
static void demand_term(struct kbase_device *kbdev)
{
CSTD_UNUSED(kbdev);
}
/** The @ref struct kbase_pm_policy structure for the demand power policy.
*
* This is the static structure that defines the demand power policy's callback and name.
*/
const struct kbase_pm_policy kbase_pm_demand_policy_ops = {
"demand", /* name */
demand_init, /* init */
demand_term, /* term */
demand_get_core_mask, /* get_core_mask */
demand_get_core_active, /* get_core_active */
0u, /* flags */
KBASE_PM_POLICY_ID_DEMAND, /* id */
};
KBASE_EXPORT_TEST_API(kbase_pm_demand_policy_ops)
#endif /* KBASE_PM_EN */
| gpl-2.0 |
MotoG-2013/android_kernel_motorola_msm8226 | fs/ecryptfs/crypto.c | 858 | 69316 | /**
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 1997-2004 Erez Zadok
* Copyright (C) 2001-2004 Stony Brook University
* Copyright (C) 2004-2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
* Michael C. Thompson <mcthomps@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/random.h>
#include <linux/compiler.h>
#include <linux/key.h>
#include <linux/namei.h>
#include <linux/crypto.h>
#include <linux/file.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "ecryptfs_kernel.h"
static int
ecryptfs_decrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
struct page *dst_page, int dst_offset,
struct page *src_page, int src_offset, int size,
unsigned char *iv);
static int
ecryptfs_encrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
struct page *dst_page, int dst_offset,
struct page *src_page, int src_offset, int size,
unsigned char *iv);
/**
* ecryptfs_to_hex
* @dst: Buffer to take hex character representation of contents of
* src; must be at least of size (src_size * 2)
* @src: Buffer to be converted to a hex string respresentation
* @src_size: number of bytes to convert
*/
void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
{
int x;
for (x = 0; x < src_size; x++)
sprintf(&dst[x * 2], "%.2x", (unsigned char)src[x]);
}
/**
* ecryptfs_from_hex
* @dst: Buffer to take the bytes from src hex; must be at least of
* size (src_size / 2)
* @src: Buffer to be converted from a hex string respresentation to raw value
* @dst_size: size of dst buffer, or number of hex characters pairs to convert
*/
void ecryptfs_from_hex(char *dst, char *src, int dst_size)
{
int x;
char tmp[3] = { 0, };
for (x = 0; x < dst_size; x++) {
tmp[0] = src[x * 2];
tmp[1] = src[x * 2 + 1];
dst[x] = (unsigned char)simple_strtol(tmp, NULL, 16);
}
}
/**
* ecryptfs_calculate_md5 - calculates the md5 of @src
* @dst: Pointer to 16 bytes of allocated memory
* @crypt_stat: Pointer to crypt_stat struct for the current inode
* @src: Data to be md5'd
* @len: Length of @src
*
* Uses the allocated crypto context that crypt_stat references to
* generate the MD5 sum of the contents of src.
*/
static int ecryptfs_calculate_md5(char *dst,
struct ecryptfs_crypt_stat *crypt_stat,
char *src, int len)
{
struct scatterlist sg;
struct hash_desc desc = {
.tfm = crypt_stat->hash_tfm,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP
};
int rc = 0;
mutex_lock(&crypt_stat->cs_hash_tfm_mutex);
sg_init_one(&sg, (u8 *)src, len);
if (!desc.tfm) {
desc.tfm = crypto_alloc_hash(ECRYPTFS_DEFAULT_HASH, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm)) {
rc = PTR_ERR(desc.tfm);
ecryptfs_printk(KERN_ERR, "Error attempting to "
"allocate crypto context; rc = [%d]\n",
rc);
goto out;
}
crypt_stat->hash_tfm = desc.tfm;
}
rc = crypto_hash_init(&desc);
if (rc) {
printk(KERN_ERR
"%s: Error initializing crypto hash; rc = [%d]\n",
__func__, rc);
goto out;
}
rc = crypto_hash_update(&desc, &sg, len);
if (rc) {
printk(KERN_ERR
"%s: Error updating crypto hash; rc = [%d]\n",
__func__, rc);
goto out;
}
rc = crypto_hash_final(&desc, dst);
if (rc) {
printk(KERN_ERR
"%s: Error finalizing crypto hash; rc = [%d]\n",
__func__, rc);
goto out;
}
out:
mutex_unlock(&crypt_stat->cs_hash_tfm_mutex);
return rc;
}
static int ecryptfs_crypto_api_algify_cipher_name(char **algified_name,
char *cipher_name,
char *chaining_modifier)
{
int cipher_name_len = strlen(cipher_name);
int chaining_modifier_len = strlen(chaining_modifier);
int algified_name_len;
int rc;
algified_name_len = (chaining_modifier_len + cipher_name_len + 3);
(*algified_name) = kmalloc(algified_name_len, GFP_KERNEL);
if (!(*algified_name)) {
rc = -ENOMEM;
goto out;
}
snprintf((*algified_name), algified_name_len, "%s(%s)",
chaining_modifier, cipher_name);
rc = 0;
out:
return rc;
}
/**
* ecryptfs_derive_iv
* @iv: destination for the derived iv vale
* @crypt_stat: Pointer to crypt_stat struct for the current inode
* @offset: Offset of the extent whose IV we are to derive
*
* Generate the initialization vector from the given root IV and page
* offset.
*
* Returns zero on success; non-zero on error.
*/
int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
loff_t offset)
{
int rc = 0;
char dst[MD5_DIGEST_SIZE];
char src[ECRYPTFS_MAX_IV_BYTES + 16];
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "root iv:\n");
ecryptfs_dump_hex(crypt_stat->root_iv, crypt_stat->iv_bytes);
}
/* TODO: It is probably secure to just cast the least
* significant bits of the root IV into an unsigned long and
* add the offset to that rather than go through all this
* hashing business. -Halcrow */
memcpy(src, crypt_stat->root_iv, crypt_stat->iv_bytes);
memset((src + crypt_stat->iv_bytes), 0, 16);
snprintf((src + crypt_stat->iv_bytes), 16, "%lld", offset);
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "source:\n");
ecryptfs_dump_hex(src, (crypt_stat->iv_bytes + 16));
}
rc = ecryptfs_calculate_md5(dst, crypt_stat, src,
(crypt_stat->iv_bytes + 16));
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
"MD5 while generating IV for a page\n");
goto out;
}
memcpy(iv, dst, crypt_stat->iv_bytes);
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "derived iv:\n");
ecryptfs_dump_hex(iv, crypt_stat->iv_bytes);
}
out:
return rc;
}
/**
* ecryptfs_init_crypt_stat
* @crypt_stat: Pointer to the crypt_stat struct to initialize.
*
* Initialize the crypt_stat structure.
*/
void
ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
{
memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
INIT_LIST_HEAD(&crypt_stat->keysig_list);
mutex_init(&crypt_stat->keysig_list_mutex);
mutex_init(&crypt_stat->cs_mutex);
mutex_init(&crypt_stat->cs_tfm_mutex);
mutex_init(&crypt_stat->cs_hash_tfm_mutex);
crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED;
}
/**
* ecryptfs_destroy_crypt_stat
* @crypt_stat: Pointer to the crypt_stat struct to initialize.
*
* Releases all memory associated with a crypt_stat struct.
*/
void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
{
struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
if (crypt_stat->tfm)
crypto_free_blkcipher(crypt_stat->tfm);
if (crypt_stat->hash_tfm)
crypto_free_hash(crypt_stat->hash_tfm);
list_for_each_entry_safe(key_sig, key_sig_tmp,
&crypt_stat->keysig_list, crypt_stat_list) {
list_del(&key_sig->crypt_stat_list);
kmem_cache_free(ecryptfs_key_sig_cache, key_sig);
}
memset(crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
}
void ecryptfs_destroy_mount_crypt_stat(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct ecryptfs_global_auth_tok *auth_tok, *auth_tok_tmp;
if (!(mount_crypt_stat->flags & ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED))
return;
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_for_each_entry_safe(auth_tok, auth_tok_tmp,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
list_del(&auth_tok->mount_crypt_stat_list);
if (auth_tok->global_auth_tok_key
&& !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID))
key_put(auth_tok->global_auth_tok_key);
kmem_cache_free(ecryptfs_global_auth_tok_cache, auth_tok);
}
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
memset(mount_crypt_stat, 0, sizeof(struct ecryptfs_mount_crypt_stat));
}
/**
* virt_to_scatterlist
* @addr: Virtual address
* @size: Size of data; should be an even multiple of the block size
* @sg: Pointer to scatterlist array; set to NULL to obtain only
* the number of scatterlist structs required in array
* @sg_size: Max array size
*
* Fills in a scatterlist array with page references for a passed
* virtual address.
*
* Returns the number of scatterlist structs in array used
*/
int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
int sg_size)
{
int i = 0;
struct page *pg;
int offset;
int remainder_of_page;
sg_init_table(sg, sg_size);
while (size > 0 && i < sg_size) {
pg = virt_to_page(addr);
offset = offset_in_page(addr);
if (sg)
sg_set_page(&sg[i], pg, 0, offset);
remainder_of_page = PAGE_CACHE_SIZE - offset;
if (size >= remainder_of_page) {
if (sg)
sg[i].length = remainder_of_page;
addr += remainder_of_page;
size -= remainder_of_page;
} else {
if (sg)
sg[i].length = size;
addr += size;
size = 0;
}
i++;
}
if (size > 0)
return -ENOMEM;
return i;
}
/**
* encrypt_scatterlist
* @crypt_stat: Pointer to the crypt_stat struct to initialize.
* @dest_sg: Destination of encrypted data
* @src_sg: Data to be encrypted
* @size: Length of data to be encrypted
* @iv: iv to use during encryption
*
* Returns the number of bytes encrypted; negative value on error
*/
static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
struct scatterlist *dest_sg,
struct scatterlist *src_sg, int size,
unsigned char *iv)
{
struct blkcipher_desc desc = {
.tfm = crypt_stat->tfm,
.info = iv,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP
};
int rc = 0;
BUG_ON(!crypt_stat || !crypt_stat->tfm
|| !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
crypt_stat->key_size);
ecryptfs_dump_hex(crypt_stat->key,
crypt_stat->key_size);
}
/* Consider doing this once, when the file is opened */
mutex_lock(&crypt_stat->cs_tfm_mutex);
if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
crypt_stat->key_size);
crypt_stat->flags |= ECRYPTFS_KEY_SET;
}
if (rc) {
ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
rc);
mutex_unlock(&crypt_stat->cs_tfm_mutex);
rc = -EINVAL;
goto out;
}
ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
crypto_blkcipher_encrypt_iv(&desc, dest_sg, src_sg, size);
mutex_unlock(&crypt_stat->cs_tfm_mutex);
out:
return rc;
}
/**
* ecryptfs_lower_offset_for_extent
*
* Convert an eCryptfs page index into a lower byte offset
*/
static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
struct ecryptfs_crypt_stat *crypt_stat)
{
(*offset) = ecryptfs_lower_header_size(crypt_stat)
+ (crypt_stat->extent_size * extent_num);
}
/**
* ecryptfs_encrypt_extent
* @enc_extent_page: Allocated page into which to encrypt the data in
* @page
* @crypt_stat: crypt_stat containing cryptographic context for the
* encryption operation
* @page: Page containing plaintext data extent to encrypt
* @extent_offset: Page extent offset for use in generating IV
*
* Encrypts one extent of data.
*
* Return zero on success; non-zero otherwise
*/
static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
struct ecryptfs_crypt_stat *crypt_stat,
struct page *page,
unsigned long extent_offset)
{
loff_t extent_base;
char extent_iv[ECRYPTFS_MAX_IV_BYTES];
int rc;
extent_base = (((loff_t)page->index)
* (PAGE_CACHE_SIZE / crypt_stat->extent_size));
rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
(extent_base + extent_offset));
if (rc) {
ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for "
"extent [0x%.16llx]; rc = [%d]\n",
(unsigned long long)(extent_base + extent_offset), rc);
goto out;
}
rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
page, (extent_offset
* crypt_stat->extent_size),
crypt_stat->extent_size, extent_iv);
if (rc < 0) {
printk(KERN_ERR "%s: Error attempting to encrypt page with "
"page->index = [%ld], extent_offset = [%ld]; "
"rc = [%d]\n", __func__, page->index, extent_offset,
rc);
goto out;
}
rc = 0;
out:
return rc;
}
/**
* ecryptfs_encrypt_page
* @page: Page mapped from the eCryptfs inode for the file; contains
* decrypted content that needs to be encrypted (to a temporary
* page; not in place) and written out to the lower file
*
* Encrypt an eCryptfs page. This is done on a per-extent basis. Note
* that eCryptfs pages may straddle the lower pages -- for instance,
* if the file was created on a machine with an 8K page size
* (resulting in an 8K header), and then the file is copied onto a
* host with a 32K page size, then when reading page 0 of the eCryptfs
* file, 24K of page 0 of the lower file will be read and decrypted,
* and then 8K of page 1 of the lower file will be read and decrypted.
*
* Returns zero on success; negative on error
*/
int ecryptfs_encrypt_page(struct page *page)
{
struct inode *ecryptfs_inode;
struct ecryptfs_crypt_stat *crypt_stat;
char *enc_extent_virt;
struct page *enc_extent_page = NULL;
loff_t extent_offset;
int rc = 0;
ecryptfs_inode = page->mapping->host;
crypt_stat =
&(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat);
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
enc_extent_page = alloc_page(GFP_USER);
if (!enc_extent_page) {
rc = -ENOMEM;
ecryptfs_printk(KERN_ERR, "Error allocating memory for "
"encrypted extent\n");
goto out;
}
enc_extent_virt = kmap(enc_extent_page);
for (extent_offset = 0;
extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
extent_offset++) {
loff_t offset;
rc = ecryptfs_encrypt_extent(enc_extent_page, crypt_stat, page,
extent_offset);
if (rc) {
printk(KERN_ERR "%s: Error encrypting extent; "
"rc = [%d]\n", __func__, rc);
goto out;
}
ecryptfs_lower_offset_for_extent(
&offset, ((((loff_t)page->index)
* (PAGE_CACHE_SIZE
/ crypt_stat->extent_size))
+ extent_offset), crypt_stat);
rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt,
offset, crypt_stat->extent_size);
if (rc < 0) {
ecryptfs_printk(KERN_ERR, "Error attempting "
"to write lower page; rc = [%d]"
"\n", rc);
goto out;
}
}
rc = 0;
out:
if (enc_extent_page) {
kunmap(enc_extent_page);
__free_page(enc_extent_page);
}
return rc;
}
static int ecryptfs_decrypt_extent(struct page *page,
struct ecryptfs_crypt_stat *crypt_stat,
struct page *enc_extent_page,
unsigned long extent_offset)
{
loff_t extent_base;
char extent_iv[ECRYPTFS_MAX_IV_BYTES];
int rc;
extent_base = (((loff_t)page->index)
* (PAGE_CACHE_SIZE / crypt_stat->extent_size));
rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
(extent_base + extent_offset));
if (rc) {
ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for "
"extent [0x%.16llx]; rc = [%d]\n",
(unsigned long long)(extent_base + extent_offset), rc);
goto out;
}
rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
(extent_offset
* crypt_stat->extent_size),
enc_extent_page, 0,
crypt_stat->extent_size, extent_iv);
if (rc < 0) {
printk(KERN_ERR "%s: Error attempting to decrypt to page with "
"page->index = [%ld], extent_offset = [%ld]; "
"rc = [%d]\n", __func__, page->index, extent_offset,
rc);
goto out;
}
rc = 0;
out:
return rc;
}
/**
* ecryptfs_decrypt_page
* @page: Page mapped from the eCryptfs inode for the file; data read
* and decrypted from the lower file will be written into this
* page
*
* Decrypt an eCryptfs page. This is done on a per-extent basis. Note
* that eCryptfs pages may straddle the lower pages -- for instance,
* if the file was created on a machine with an 8K page size
* (resulting in an 8K header), and then the file is copied onto a
* host with a 32K page size, then when reading page 0 of the eCryptfs
* file, 24K of page 0 of the lower file will be read and decrypted,
* and then 8K of page 1 of the lower file will be read and decrypted.
*
* Returns zero on success; negative on error
*/
int ecryptfs_decrypt_page(struct page *page)
{
struct inode *ecryptfs_inode;
struct ecryptfs_crypt_stat *crypt_stat;
char *enc_extent_virt;
struct page *enc_extent_page = NULL;
unsigned long extent_offset;
int rc = 0;
ecryptfs_inode = page->mapping->host;
crypt_stat =
&(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat);
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
enc_extent_page = alloc_page(GFP_USER);
if (!enc_extent_page) {
rc = -ENOMEM;
ecryptfs_printk(KERN_ERR, "Error allocating memory for "
"encrypted extent\n");
goto out;
}
enc_extent_virt = kmap(enc_extent_page);
for (extent_offset = 0;
extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
extent_offset++) {
loff_t offset;
ecryptfs_lower_offset_for_extent(
&offset, ((page->index * (PAGE_CACHE_SIZE
/ crypt_stat->extent_size))
+ extent_offset), crypt_stat);
rc = ecryptfs_read_lower(enc_extent_virt, offset,
crypt_stat->extent_size,
ecryptfs_inode);
if (rc < 0) {
ecryptfs_printk(KERN_ERR, "Error attempting "
"to read lower page; rc = [%d]"
"\n", rc);
goto out;
}
rc = ecryptfs_decrypt_extent(page, crypt_stat, enc_extent_page,
extent_offset);
if (rc) {
printk(KERN_ERR "%s: Error encrypting extent; "
"rc = [%d]\n", __func__, rc);
goto out;
}
}
out:
if (enc_extent_page) {
kunmap(enc_extent_page);
__free_page(enc_extent_page);
}
return rc;
}
/**
* decrypt_scatterlist
* @crypt_stat: Cryptographic context
* @dest_sg: The destination scatterlist to decrypt into
* @src_sg: The source scatterlist to decrypt from
* @size: The number of bytes to decrypt
* @iv: The initialization vector to use for the decryption
*
* Returns the number of bytes decrypted; negative value on error
*/
static int decrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
struct scatterlist *dest_sg,
struct scatterlist *src_sg, int size,
unsigned char *iv)
{
struct blkcipher_desc desc = {
.tfm = crypt_stat->tfm,
.info = iv,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP
};
int rc = 0;
/* Consider doing this once, when the file is opened */
mutex_lock(&crypt_stat->cs_tfm_mutex);
rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
crypt_stat->key_size);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
rc);
mutex_unlock(&crypt_stat->cs_tfm_mutex);
rc = -EINVAL;
goto out;
}
ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
rc = crypto_blkcipher_decrypt_iv(&desc, dest_sg, src_sg, size);
mutex_unlock(&crypt_stat->cs_tfm_mutex);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error decrypting; rc = [%d]\n",
rc);
goto out;
}
rc = size;
out:
return rc;
}
/**
* ecryptfs_encrypt_page_offset
* @crypt_stat: The cryptographic context
* @dst_page: The page to encrypt into
* @dst_offset: The offset in the page to encrypt into
* @src_page: The page to encrypt from
* @src_offset: The offset in the page to encrypt from
* @size: The number of bytes to encrypt
* @iv: The initialization vector to use for the encryption
*
* Returns the number of bytes encrypted
*/
static int
ecryptfs_encrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
struct page *dst_page, int dst_offset,
struct page *src_page, int src_offset, int size,
unsigned char *iv)
{
struct scatterlist src_sg, dst_sg;
sg_init_table(&src_sg, 1);
sg_init_table(&dst_sg, 1);
sg_set_page(&src_sg, src_page, size, src_offset);
sg_set_page(&dst_sg, dst_page, size, dst_offset);
return encrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv);
}
/**
* ecryptfs_decrypt_page_offset
* @crypt_stat: The cryptographic context
* @dst_page: The page to decrypt into
* @dst_offset: The offset in the page to decrypt into
* @src_page: The page to decrypt from
* @src_offset: The offset in the page to decrypt from
* @size: The number of bytes to decrypt
* @iv: The initialization vector to use for the decryption
*
* Returns the number of bytes decrypted
*/
static int
ecryptfs_decrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
struct page *dst_page, int dst_offset,
struct page *src_page, int src_offset, int size,
unsigned char *iv)
{
struct scatterlist src_sg, dst_sg;
sg_init_table(&src_sg, 1);
sg_set_page(&src_sg, src_page, size, src_offset);
sg_init_table(&dst_sg, 1);
sg_set_page(&dst_sg, dst_page, size, dst_offset);
return decrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv);
}
#define ECRYPTFS_MAX_SCATTERLIST_LEN 4
/**
* ecryptfs_init_crypt_ctx
* @crypt_stat: Uninitialized crypt stats structure
*
* Initialize the crypto context.
*
* TODO: Performance: Keep a cache of initialized cipher contexts;
* only init if needed
*/
int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
{
char *full_alg_name;
int rc = -EINVAL;
if (!crypt_stat->cipher) {
ecryptfs_printk(KERN_ERR, "No cipher specified\n");
goto out;
}
ecryptfs_printk(KERN_DEBUG,
"Initializing cipher [%s]; strlen = [%d]; "
"key_size_bits = [%zd]\n",
crypt_stat->cipher, (int)strlen(crypt_stat->cipher),
crypt_stat->key_size << 3);
if (crypt_stat->tfm) {
rc = 0;
goto out;
}
mutex_lock(&crypt_stat->cs_tfm_mutex);
rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name,
crypt_stat->cipher, "cbc");
if (rc)
goto out_unlock;
crypt_stat->tfm = crypto_alloc_blkcipher(full_alg_name, 0,
CRYPTO_ALG_ASYNC);
kfree(full_alg_name);
if (IS_ERR(crypt_stat->tfm)) {
rc = PTR_ERR(crypt_stat->tfm);
crypt_stat->tfm = NULL;
ecryptfs_printk(KERN_ERR, "cryptfs: init_crypt_ctx(): "
"Error initializing cipher [%s]\n",
crypt_stat->cipher);
goto out_unlock;
}
crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
rc = 0;
out_unlock:
mutex_unlock(&crypt_stat->cs_tfm_mutex);
out:
return rc;
}
static void set_extent_mask_and_shift(struct ecryptfs_crypt_stat *crypt_stat)
{
int extent_size_tmp;
crypt_stat->extent_mask = 0xFFFFFFFF;
crypt_stat->extent_shift = 0;
if (crypt_stat->extent_size == 0)
return;
extent_size_tmp = crypt_stat->extent_size;
while ((extent_size_tmp & 0x01) == 0) {
extent_size_tmp >>= 1;
crypt_stat->extent_mask <<= 1;
crypt_stat->extent_shift++;
}
}
void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
{
/* Default values; may be overwritten as we are parsing the
* packets. */
crypt_stat->extent_size = ECRYPTFS_DEFAULT_EXTENT_SIZE;
set_extent_mask_and_shift(crypt_stat);
crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else {
if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
crypt_stat->metadata_size =
ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else
crypt_stat->metadata_size = PAGE_CACHE_SIZE;
}
}
/**
* ecryptfs_compute_root_iv
* @crypt_stats
*
* On error, sets the root IV to all 0's.
*/
int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat)
{
int rc = 0;
char dst[MD5_DIGEST_SIZE];
BUG_ON(crypt_stat->iv_bytes > MD5_DIGEST_SIZE);
BUG_ON(crypt_stat->iv_bytes <= 0);
if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
rc = -EINVAL;
ecryptfs_printk(KERN_WARNING, "Session key not valid; "
"cannot generate root IV\n");
goto out;
}
rc = ecryptfs_calculate_md5(dst, crypt_stat, crypt_stat->key,
crypt_stat->key_size);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
"MD5 while generating root IV\n");
goto out;
}
memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes);
out:
if (rc) {
memset(crypt_stat->root_iv, 0, crypt_stat->iv_bytes);
crypt_stat->flags |= ECRYPTFS_SECURITY_WARNING;
}
return rc;
}
static void ecryptfs_generate_new_key(struct ecryptfs_crypt_stat *crypt_stat)
{
get_random_bytes(crypt_stat->key, crypt_stat->key_size);
crypt_stat->flags |= ECRYPTFS_KEY_VALID;
ecryptfs_compute_root_iv(crypt_stat);
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Generated new session key:\n");
ecryptfs_dump_hex(crypt_stat->key,
crypt_stat->key_size);
}
}
/**
* ecryptfs_copy_mount_wide_flags_to_inode_flags
* @crypt_stat: The inode's cryptographic context
* @mount_crypt_stat: The mount point's cryptographic context
*
* This function propagates the mount-wide flags to individual inode
* flags.
*/
static void ecryptfs_copy_mount_wide_flags_to_inode_flags(
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
if (mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED)
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
crypt_stat->flags |= ECRYPTFS_VIEW_AS_ENCRYPTED;
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
crypt_stat->flags |= ECRYPTFS_ENCRYPT_FILENAMES;
if (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK)
crypt_stat->flags |= ECRYPTFS_ENCFN_USE_MOUNT_FNEK;
else if (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_FEK)
crypt_stat->flags |= ECRYPTFS_ENCFN_USE_FEK;
}
}
static int ecryptfs_copy_mount_wide_sigs_to_inode_sigs(
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct ecryptfs_global_auth_tok *global_auth_tok;
int rc = 0;
mutex_lock(&crypt_stat->keysig_list_mutex);
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_for_each_entry(global_auth_tok,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
if (global_auth_tok->flags & ECRYPTFS_AUTH_TOK_FNEK)
continue;
rc = ecryptfs_add_keysig(crypt_stat, global_auth_tok->sig);
if (rc) {
printk(KERN_ERR "Error adding keysig; rc = [%d]\n", rc);
goto out;
}
}
out:
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
mutex_unlock(&crypt_stat->keysig_list_mutex);
return rc;
}
/**
* ecryptfs_set_default_crypt_stat_vals
* @crypt_stat: The inode's cryptographic context
* @mount_crypt_stat: The mount point's cryptographic context
*
* Default values in the event that policy does not override them.
*/
static void ecryptfs_set_default_crypt_stat_vals(
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat,
mount_crypt_stat);
ecryptfs_set_default_sizes(crypt_stat);
strcpy(crypt_stat->cipher, ECRYPTFS_DEFAULT_CIPHER);
crypt_stat->key_size = ECRYPTFS_DEFAULT_KEY_BYTES;
crypt_stat->flags &= ~(ECRYPTFS_KEY_VALID);
crypt_stat->file_version = ECRYPTFS_FILE_VERSION;
crypt_stat->mount_crypt_stat = mount_crypt_stat;
}
/**
* ecryptfs_new_file_context
* @ecryptfs_inode: The eCryptfs inode
*
* If the crypto context for the file has not yet been established,
* this is where we do that. Establishing a new crypto context
* involves the following decisions:
* - What cipher to use?
* - What set of authentication tokens to use?
* Here we just worry about getting enough information into the
* authentication tokens so that we know that they are available.
* We associate the available authentication tokens with the new file
* via the set of signatures in the crypt_stat struct. Later, when
* the headers are actually written out, we may again defer to
* userspace to perform the encryption of the session key; for the
* foreseeable future, this will be the case with public key packets.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_new_file_context(struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
ecryptfs_inode->i_sb)->mount_crypt_stat;
int cipher_name_len;
int rc = 0;
ecryptfs_set_default_crypt_stat_vals(crypt_stat, mount_crypt_stat);
crypt_stat->flags |= (ECRYPTFS_ENCRYPTED | ECRYPTFS_KEY_VALID);
ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat,
mount_crypt_stat);
rc = ecryptfs_copy_mount_wide_sigs_to_inode_sigs(crypt_stat,
mount_crypt_stat);
if (rc) {
printk(KERN_ERR "Error attempting to copy mount-wide key sigs "
"to the inode key sigs; rc = [%d]\n", rc);
goto out;
}
cipher_name_len =
strlen(mount_crypt_stat->global_default_cipher_name);
memcpy(crypt_stat->cipher,
mount_crypt_stat->global_default_cipher_name,
cipher_name_len);
crypt_stat->cipher[cipher_name_len] = '\0';
crypt_stat->key_size =
mount_crypt_stat->global_default_cipher_key_size;
ecryptfs_generate_new_key(crypt_stat);
rc = ecryptfs_init_crypt_ctx(crypt_stat);
if (rc)
ecryptfs_printk(KERN_ERR, "Error initializing cryptographic "
"context for cipher [%s]: rc = [%d]\n",
crypt_stat->cipher, rc);
out:
return rc;
}
/**
* ecryptfs_validate_marker - check for the ecryptfs marker
* @data: The data block in which to check
*
* Returns zero if marker found; -EINVAL if not found
*/
static int ecryptfs_validate_marker(char *data)
{
u32 m_1, m_2;
m_1 = get_unaligned_be32(data);
m_2 = get_unaligned_be32(data + 4);
if ((m_1 ^ MAGIC_ECRYPTFS_MARKER) == m_2)
return 0;
ecryptfs_printk(KERN_DEBUG, "m_1 = [0x%.8x]; m_2 = [0x%.8x]; "
"MAGIC_ECRYPTFS_MARKER = [0x%.8x]\n", m_1, m_2,
MAGIC_ECRYPTFS_MARKER);
ecryptfs_printk(KERN_DEBUG, "(m_1 ^ MAGIC_ECRYPTFS_MARKER) = "
"[0x%.8x]\n", (m_1 ^ MAGIC_ECRYPTFS_MARKER));
return -EINVAL;
}
struct ecryptfs_flag_map_elem {
u32 file_flag;
u32 local_flag;
};
/* Add support for additional flags by adding elements here. */
static struct ecryptfs_flag_map_elem ecryptfs_flag_map[] = {
{0x00000001, ECRYPTFS_ENABLE_HMAC},
{0x00000002, ECRYPTFS_ENCRYPTED},
{0x00000004, ECRYPTFS_METADATA_IN_XATTR},
{0x00000008, ECRYPTFS_ENCRYPT_FILENAMES}
};
/**
* ecryptfs_process_flags
* @crypt_stat: The cryptographic context
* @page_virt: Source data to be parsed
* @bytes_read: Updated with the number of bytes read
*
* Returns zero on success; non-zero if the flag set is invalid
*/
static int ecryptfs_process_flags(struct ecryptfs_crypt_stat *crypt_stat,
char *page_virt, int *bytes_read)
{
int rc = 0;
int i;
u32 flags;
flags = get_unaligned_be32(page_virt);
for (i = 0; i < ((sizeof(ecryptfs_flag_map)
/ sizeof(struct ecryptfs_flag_map_elem))); i++)
if (flags & ecryptfs_flag_map[i].file_flag) {
crypt_stat->flags |= ecryptfs_flag_map[i].local_flag;
} else
crypt_stat->flags &= ~(ecryptfs_flag_map[i].local_flag);
/* Version is in top 8 bits of the 32-bit flag vector */
crypt_stat->file_version = ((flags >> 24) & 0xFF);
(*bytes_read) = 4;
return rc;
}
/**
* write_ecryptfs_marker
* @page_virt: The pointer to in a page to begin writing the marker
* @written: Number of bytes written
*
* Marker = 0x3c81b7f5
*/
static void write_ecryptfs_marker(char *page_virt, size_t *written)
{
u32 m_1, m_2;
get_random_bytes(&m_1, (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2));
m_2 = (m_1 ^ MAGIC_ECRYPTFS_MARKER);
put_unaligned_be32(m_1, page_virt);
page_virt += (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2);
put_unaligned_be32(m_2, page_virt);
(*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
}
void ecryptfs_write_crypt_stat_flags(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat,
size_t *written)
{
u32 flags = 0;
int i;
for (i = 0; i < ((sizeof(ecryptfs_flag_map)
/ sizeof(struct ecryptfs_flag_map_elem))); i++)
if (crypt_stat->flags & ecryptfs_flag_map[i].local_flag)
flags |= ecryptfs_flag_map[i].file_flag;
/* Version is in top 8 bits of the 32-bit flag vector */
flags |= ((((u8)crypt_stat->file_version) << 24) & 0xFF000000);
put_unaligned_be32(flags, page_virt);
(*written) = 4;
}
struct ecryptfs_cipher_code_str_map_elem {
char cipher_str[16];
u8 cipher_code;
};
/* Add support for additional ciphers by adding elements here. The
* cipher_code is whatever OpenPGP applicatoins use to identify the
* ciphers. List in order of probability. */
static struct ecryptfs_cipher_code_str_map_elem
ecryptfs_cipher_code_str_map[] = {
{"aes",RFC2440_CIPHER_AES_128 },
{"blowfish", RFC2440_CIPHER_BLOWFISH},
{"des3_ede", RFC2440_CIPHER_DES3_EDE},
{"cast5", RFC2440_CIPHER_CAST_5},
{"twofish", RFC2440_CIPHER_TWOFISH},
{"cast6", RFC2440_CIPHER_CAST_6},
{"aes", RFC2440_CIPHER_AES_192},
{"aes", RFC2440_CIPHER_AES_256}
};
/**
* ecryptfs_code_for_cipher_string
* @cipher_name: The string alias for the cipher
* @key_bytes: Length of key in bytes; used for AES code selection
*
* Returns zero on no match, or the cipher code on match
*/
u8 ecryptfs_code_for_cipher_string(char *cipher_name, size_t key_bytes)
{
int i;
u8 code = 0;
struct ecryptfs_cipher_code_str_map_elem *map =
ecryptfs_cipher_code_str_map;
if (strcmp(cipher_name, "aes") == 0) {
switch (key_bytes) {
case 16:
code = RFC2440_CIPHER_AES_128;
break;
case 24:
code = RFC2440_CIPHER_AES_192;
break;
case 32:
code = RFC2440_CIPHER_AES_256;
}
} else {
for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++)
if (strcmp(cipher_name, map[i].cipher_str) == 0) {
code = map[i].cipher_code;
break;
}
}
return code;
}
/**
* ecryptfs_cipher_code_to_string
* @str: Destination to write out the cipher name
* @cipher_code: The code to convert to cipher name string
*
* Returns zero on success
*/
int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code)
{
int rc = 0;
int i;
str[0] = '\0';
for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++)
if (cipher_code == ecryptfs_cipher_code_str_map[i].cipher_code)
strcpy(str, ecryptfs_cipher_code_str_map[i].cipher_str);
if (str[0] == '\0') {
ecryptfs_printk(KERN_WARNING, "Cipher code not recognized: "
"[%d]\n", cipher_code);
rc = -EINVAL;
}
return rc;
}
int ecryptfs_read_and_validate_header_region(struct inode *inode)
{
u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES];
u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES;
int rc;
rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES,
inode);
if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
return rc >= 0 ? -EINVAL : rc;
rc = ecryptfs_validate_marker(marker);
if (!rc)
ecryptfs_i_size_init(file_size, inode);
return rc;
}
void
ecryptfs_write_header_metadata(char *virt,
struct ecryptfs_crypt_stat *crypt_stat,
size_t *written)
{
u32 header_extent_size;
u16 num_header_extents_at_front;
header_extent_size = (u32)crypt_stat->extent_size;
num_header_extents_at_front =
(u16)(crypt_stat->metadata_size / crypt_stat->extent_size);
put_unaligned_be32(header_extent_size, virt);
virt += 4;
put_unaligned_be16(num_header_extents_at_front, virt);
(*written) = 6;
}
struct kmem_cache *ecryptfs_header_cache;
/**
* ecryptfs_write_headers_virt
* @page_virt: The virtual address to write the headers to
* @max: The size of memory allocated at page_virt
* @size: Set to the number of bytes written by this function
* @crypt_stat: The cryptographic context
* @ecryptfs_dentry: The eCryptfs dentry
*
* Format version: 1
*
* Header Extent:
* Octets 0-7: Unencrypted file size (big-endian)
* Octets 8-15: eCryptfs special marker
* Octets 16-19: Flags
* Octet 16: File format version number (between 0 and 255)
* Octets 17-18: Reserved
* Octet 19: Bit 1 (lsb): Reserved
* Bit 2: Encrypted?
* Bits 3-8: Reserved
* Octets 20-23: Header extent size (big-endian)
* Octets 24-25: Number of header extents at front of file
* (big-endian)
* Octet 26: Begin RFC 2440 authentication token packet set
* Data Extent 0:
* Lower data (CBC encrypted)
* Data Extent 1:
* Lower data (CBC encrypted)
* ...
*
* Returns zero on success
*/
static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
size_t *size,
struct ecryptfs_crypt_stat *crypt_stat,
struct dentry *ecryptfs_dentry)
{
int rc;
size_t written;
size_t offset;
offset = ECRYPTFS_FILE_SIZE_BYTES;
write_ecryptfs_marker((page_virt + offset), &written);
offset += written;
ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat,
&written);
offset += written;
ecryptfs_write_header_metadata((page_virt + offset), crypt_stat,
&written);
offset += written;
rc = ecryptfs_generate_key_packet_set((page_virt + offset), crypt_stat,
ecryptfs_dentry, &written,
max - offset);
if (rc)
ecryptfs_printk(KERN_WARNING, "Error generating key packet "
"set; rc = [%d]\n", rc);
if (size) {
offset += written;
*size = offset;
}
return rc;
}
static int
ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
char *virt, size_t virt_len)
{
int rc;
rc = ecryptfs_write_lower(ecryptfs_inode, virt,
0, virt_len);
if (rc < 0)
printk(KERN_ERR "%s: Error attempting to write header "
"information to lower file; rc = [%d]\n", __func__, rc);
else
rc = 0;
return rc;
}
static int
ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
char *page_virt, size_t size)
{
int rc;
rc = ecryptfs_setxattr(ecryptfs_dentry, ECRYPTFS_XATTR_NAME, page_virt,
size, 0);
return rc;
}
static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
unsigned int order)
{
struct page *page;
page = alloc_pages(gfp_mask | __GFP_ZERO, order);
if (page)
return (unsigned long) page_address(page);
return 0;
}
/**
* ecryptfs_write_metadata
* @ecryptfs_dentry: The eCryptfs dentry, which should be negative
* @ecryptfs_inode: The newly created eCryptfs inode
*
* Write the file headers out. This will likely involve a userspace
* callout, in which the session key is encrypted with one or more
* public keys and/or the passphrase necessary to do the encryption is
* retrieved via a prompt. Exactly what happens at this point should
* be policy-dependent.
*
* Returns zero on success; non-zero on error
*/
int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
unsigned int order;
char *virt;
size_t virt_len;
size_t size = 0;
int rc = 0;
if (likely(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
printk(KERN_ERR "Key is invalid; bailing out\n");
rc = -EINVAL;
goto out;
}
} else {
printk(KERN_WARNING "%s: Encrypted flag not set\n",
__func__);
rc = -EINVAL;
goto out;
}
virt_len = crypt_stat->metadata_size;
order = get_order(virt_len);
/* Released in this function */
virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order);
if (!virt) {
printk(KERN_ERR "%s: Out of memory\n", __func__);
rc = -ENOMEM;
goto out;
}
/* Zeroed page ensures the in-header unencrypted i_size is set to 0 */
rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat,
ecryptfs_dentry);
if (unlikely(rc)) {
printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
__func__, rc);
goto out_free;
}
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
size);
else
rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
virt_len);
if (rc) {
printk(KERN_ERR "%s: Error writing metadata out to lower file; "
"rc = [%d]\n", __func__, rc);
goto out_free;
}
out_free:
free_pages((unsigned long)virt, order);
out:
return rc;
}
#define ECRYPTFS_DONT_VALIDATE_HEADER_SIZE 0
#define ECRYPTFS_VALIDATE_HEADER_SIZE 1
static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
char *virt, int *bytes_read,
int validate_header_size)
{
int rc = 0;
u32 header_extent_size;
u16 num_header_extents_at_front;
header_extent_size = get_unaligned_be32(virt);
virt += sizeof(__be32);
num_header_extents_at_front = get_unaligned_be16(virt);
crypt_stat->metadata_size = (((size_t)num_header_extents_at_front
* (size_t)header_extent_size));
(*bytes_read) = (sizeof(__be32) + sizeof(__be16));
if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE)
&& (crypt_stat->metadata_size
< ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) {
rc = -EINVAL;
printk(KERN_WARNING "Invalid header size: [%zd]\n",
crypt_stat->metadata_size);
}
return rc;
}
/**
* set_default_header_data
* @crypt_stat: The cryptographic context
*
* For version 0 file format; this function is only for backwards
* compatibility for files created with the prior versions of
* eCryptfs.
*/
static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
{
crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
}
void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
{
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_crypt_stat *crypt_stat;
u64 file_size;
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
mount_crypt_stat =
&ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
file_size = i_size_read(ecryptfs_inode_to_lower(inode));
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
file_size += crypt_stat->metadata_size;
} else
file_size = get_unaligned_be64(page_virt);
i_size_write(inode, (loff_t)file_size);
crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
}
/**
* ecryptfs_read_headers_virt
* @page_virt: The virtual address into which to read the headers
* @crypt_stat: The cryptographic context
* @ecryptfs_dentry: The eCryptfs dentry
* @validate_header_size: Whether to validate the header size while reading
*
* Read/parse the header data. The header format is detailed in the
* comment block for the ecryptfs_write_headers_virt() function.
*
* Returns zero on success
*/
static int ecryptfs_read_headers_virt(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat,
struct dentry *ecryptfs_dentry,
int validate_header_size)
{
int rc = 0;
int offset;
int bytes_read;
ecryptfs_set_default_sizes(crypt_stat);
crypt_stat->mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
offset = ECRYPTFS_FILE_SIZE_BYTES;
rc = ecryptfs_validate_marker(page_virt + offset);
if (rc)
goto out;
if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
&bytes_read);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error processing flags\n");
goto out;
}
if (crypt_stat->file_version > ECRYPTFS_SUPPORTED_FILE_VERSION) {
ecryptfs_printk(KERN_WARNING, "File version is [%d]; only "
"file version [%d] is supported by this "
"version of eCryptfs\n",
crypt_stat->file_version,
ECRYPTFS_SUPPORTED_FILE_VERSION);
rc = -EINVAL;
goto out;
}
offset += bytes_read;
if (crypt_stat->file_version >= 1) {
rc = parse_header_metadata(crypt_stat, (page_virt + offset),
&bytes_read, validate_header_size);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error reading header "
"metadata; rc = [%d]\n", rc);
}
offset += bytes_read;
} else
set_default_header_data(crypt_stat);
rc = ecryptfs_parse_packet_set(crypt_stat, (page_virt + offset),
ecryptfs_dentry);
out:
return rc;
}
/**
* ecryptfs_read_xattr_region
* @page_virt: The vitual address into which to read the xattr data
* @ecryptfs_inode: The eCryptfs inode
*
* Attempts to read the crypto metadata from the extended attribute
* region of the lower file.
*
* Returns zero on success; non-zero on error
*/
int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode)
{
struct dentry *lower_dentry =
ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_dentry;
ssize_t size;
int rc = 0;
size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME,
page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE);
if (size < 0) {
if (unlikely(ecryptfs_verbosity > 0))
printk(KERN_INFO "Error attempting to read the [%s] "
"xattr from the lower file; return value = "
"[%zd]\n", ECRYPTFS_XATTR_NAME, size);
rc = -EINVAL;
goto out;
}
out:
return rc;
}
int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
struct inode *inode)
{
u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES];
u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES;
int rc;
rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry),
ECRYPTFS_XATTR_NAME, file_size,
ECRYPTFS_SIZE_AND_MARKER_BYTES);
if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
return rc >= 0 ? -EINVAL : rc;
rc = ecryptfs_validate_marker(marker);
if (!rc)
ecryptfs_i_size_init(file_size, inode);
return rc;
}
/**
* ecryptfs_read_metadata
*
* Common entry point for reading file metadata. From here, we could
* retrieve the header information from the header region of the file,
* the xattr region of the file, or some other repostory that is
* stored separately from the file itself. The current implementation
* supports retrieving the metadata information from the file contents
* and from the xattr region.
*
* Returns zero if valid headers found and parsed; non-zero otherwise
*/
int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
{
int rc;
char *page_virt;
struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode;
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat,
mount_crypt_stat);
/* Read the first page from the underlying file */
page_virt = kmem_cache_alloc(ecryptfs_header_cache, GFP_USER);
if (!page_virt) {
rc = -ENOMEM;
printk(KERN_ERR "%s: Unable to allocate page_virt\n",
__func__);
goto out;
}
rc = ecryptfs_read_lower(page_virt, 0, crypt_stat->extent_size,
ecryptfs_inode);
if (rc >= 0)
rc = ecryptfs_read_headers_virt(page_virt, crypt_stat,
ecryptfs_dentry,
ECRYPTFS_VALIDATE_HEADER_SIZE);
if (rc) {
/* metadata is not in the file header, so try xattrs */
memset(page_virt, 0, PAGE_CACHE_SIZE);
rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
if (rc) {
printk(KERN_DEBUG "Valid eCryptfs headers not found in "
"file header region or xattr region, inode %lu\n",
ecryptfs_inode->i_ino);
rc = -EINVAL;
goto out;
}
rc = ecryptfs_read_headers_virt(page_virt, crypt_stat,
ecryptfs_dentry,
ECRYPTFS_DONT_VALIDATE_HEADER_SIZE);
if (rc) {
printk(KERN_DEBUG "Valid eCryptfs headers not found in "
"file xattr region either, inode %lu\n",
ecryptfs_inode->i_ino);
rc = -EINVAL;
}
if (crypt_stat->mount_crypt_stat->flags
& ECRYPTFS_XATTR_METADATA_ENABLED) {
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
} else {
printk(KERN_WARNING "Attempt to access file with "
"crypto metadata only in the extended attribute "
"region, but eCryptfs was mounted without "
"xattr support enabled. eCryptfs will not treat "
"this like an encrypted file, inode %lu\n",
ecryptfs_inode->i_ino);
rc = -EINVAL;
}
}
out:
if (page_virt) {
memset(page_virt, 0, PAGE_CACHE_SIZE);
kmem_cache_free(ecryptfs_header_cache, page_virt);
}
return rc;
}
/**
* ecryptfs_encrypt_filename - encrypt filename
*
* CBC-encrypts the filename. We do not want to encrypt the same
* filename with the same key and IV, which may happen with hard
* links, so we prepend random bits to each filename.
*
* Returns zero on success; non-zero otherwise
*/
static int
ecryptfs_encrypt_filename(struct ecryptfs_filename *filename,
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
int rc = 0;
filename->encrypted_filename = NULL;
filename->encrypted_filename_size = 0;
if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCFN_USE_MOUNT_FNEK))
|| (mount_crypt_stat && (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) {
size_t packet_size;
size_t remaining_bytes;
rc = ecryptfs_write_tag_70_packet(
NULL, NULL,
&filename->encrypted_filename_size,
mount_crypt_stat, NULL,
filename->filename_size);
if (rc) {
printk(KERN_ERR "%s: Error attempting to get packet "
"size for tag 72; rc = [%d]\n", __func__,
rc);
filename->encrypted_filename_size = 0;
goto out;
}
filename->encrypted_filename =
kmalloc(filename->encrypted_filename_size, GFP_KERNEL);
if (!filename->encrypted_filename) {
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to kmalloc [%zd] bytes\n", __func__,
filename->encrypted_filename_size);
rc = -ENOMEM;
goto out;
}
remaining_bytes = filename->encrypted_filename_size;
rc = ecryptfs_write_tag_70_packet(filename->encrypted_filename,
&remaining_bytes,
&packet_size,
mount_crypt_stat,
filename->filename,
filename->filename_size);
if (rc) {
printk(KERN_ERR "%s: Error attempting to generate "
"tag 70 packet; rc = [%d]\n", __func__,
rc);
kfree(filename->encrypted_filename);
filename->encrypted_filename = NULL;
filename->encrypted_filename_size = 0;
goto out;
}
filename->encrypted_filename_size = packet_size;
} else {
printk(KERN_ERR "%s: No support for requested filename "
"encryption method in this release\n", __func__);
rc = -EOPNOTSUPP;
goto out;
}
out:
return rc;
}
static int ecryptfs_copy_filename(char **copied_name, size_t *copied_name_size,
const char *name, size_t name_size)
{
int rc = 0;
(*copied_name) = kmalloc((name_size + 1), GFP_KERNEL);
if (!(*copied_name)) {
rc = -ENOMEM;
goto out;
}
memcpy((void *)(*copied_name), (void *)name, name_size);
(*copied_name)[(name_size)] = '\0'; /* Only for convenience
* in printing out the
* string in debug
* messages */
(*copied_name_size) = name_size;
out:
return rc;
}
/**
* ecryptfs_process_key_cipher - Perform key cipher initialization.
* @key_tfm: Crypto context for key material, set by this function
* @cipher_name: Name of the cipher
* @key_size: Size of the key in bytes
*
* Returns zero on success. Any crypto_tfm structs allocated here
* should be released by other functions, such as on a superblock put
* event, regardless of whether this function succeeds for fails.
*/
static int
ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
char *cipher_name, size_t *key_size)
{
char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
char *full_alg_name = NULL;
int rc;
*key_tfm = NULL;
if (*key_size > ECRYPTFS_MAX_KEY_BYTES) {
rc = -EINVAL;
printk(KERN_ERR "Requested key size is [%zd] bytes; maximum "
"allowable is [%d]\n", *key_size, ECRYPTFS_MAX_KEY_BYTES);
goto out;
}
rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name, cipher_name,
"ecb");
if (rc)
goto out;
*key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(*key_tfm)) {
rc = PTR_ERR(*key_tfm);
printk(KERN_ERR "Unable to allocate crypto cipher with name "
"[%s]; rc = [%d]\n", full_alg_name, rc);
goto out;
}
crypto_blkcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY);
if (*key_size == 0) {
struct blkcipher_alg *alg = crypto_blkcipher_alg(*key_tfm);
*key_size = alg->max_keysize;
}
get_random_bytes(dummy_key, *key_size);
rc = crypto_blkcipher_setkey(*key_tfm, dummy_key, *key_size);
if (rc) {
printk(KERN_ERR "Error attempting to set key of size [%zd] for "
"cipher [%s]; rc = [%d]\n", *key_size, full_alg_name,
rc);
rc = -EINVAL;
goto out;
}
out:
kfree(full_alg_name);
return rc;
}
struct kmem_cache *ecryptfs_key_tfm_cache;
static struct list_head key_tfm_list;
struct mutex key_tfm_list_mutex;
int __init ecryptfs_init_crypto(void)
{
mutex_init(&key_tfm_list_mutex);
INIT_LIST_HEAD(&key_tfm_list);
return 0;
}
/**
* ecryptfs_destroy_crypto - free all cached key_tfms on key_tfm_list
*
* Called only at module unload time
*/
int ecryptfs_destroy_crypto(void)
{
struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp;
mutex_lock(&key_tfm_list_mutex);
list_for_each_entry_safe(key_tfm, key_tfm_tmp, &key_tfm_list,
key_tfm_list) {
list_del(&key_tfm->key_tfm_list);
if (key_tfm->key_tfm)
crypto_free_blkcipher(key_tfm->key_tfm);
kmem_cache_free(ecryptfs_key_tfm_cache, key_tfm);
}
mutex_unlock(&key_tfm_list_mutex);
return 0;
}
int
ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
size_t key_size)
{
struct ecryptfs_key_tfm *tmp_tfm;
int rc = 0;
BUG_ON(!mutex_is_locked(&key_tfm_list_mutex));
tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL);
if (key_tfm != NULL)
(*key_tfm) = tmp_tfm;
if (!tmp_tfm) {
rc = -ENOMEM;
printk(KERN_ERR "Error attempting to allocate from "
"ecryptfs_key_tfm_cache\n");
goto out;
}
mutex_init(&tmp_tfm->key_tfm_mutex);
strncpy(tmp_tfm->cipher_name, cipher_name,
ECRYPTFS_MAX_CIPHER_NAME_SIZE);
tmp_tfm->cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
tmp_tfm->key_size = key_size;
rc = ecryptfs_process_key_cipher(&tmp_tfm->key_tfm,
tmp_tfm->cipher_name,
&tmp_tfm->key_size);
if (rc) {
printk(KERN_ERR "Error attempting to initialize key TFM "
"cipher with name = [%s]; rc = [%d]\n",
tmp_tfm->cipher_name, rc);
kmem_cache_free(ecryptfs_key_tfm_cache, tmp_tfm);
if (key_tfm != NULL)
(*key_tfm) = NULL;
goto out;
}
list_add(&tmp_tfm->key_tfm_list, &key_tfm_list);
out:
return rc;
}
/**
* ecryptfs_tfm_exists - Search for existing tfm for cipher_name.
* @cipher_name: the name of the cipher to search for
* @key_tfm: set to corresponding tfm if found
*
* Searches for cached key_tfm matching @cipher_name
* Must be called with &key_tfm_list_mutex held
* Returns 1 if found, with @key_tfm set
* Returns 0 if not found, with @key_tfm set to NULL
*/
int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm)
{
struct ecryptfs_key_tfm *tmp_key_tfm;
BUG_ON(!mutex_is_locked(&key_tfm_list_mutex));
list_for_each_entry(tmp_key_tfm, &key_tfm_list, key_tfm_list) {
if (strcmp(tmp_key_tfm->cipher_name, cipher_name) == 0) {
if (key_tfm)
(*key_tfm) = tmp_key_tfm;
return 1;
}
}
if (key_tfm)
(*key_tfm) = NULL;
return 0;
}
/**
* ecryptfs_get_tfm_and_mutex_for_cipher_name
*
* @tfm: set to cached tfm found, or new tfm created
* @tfm_mutex: set to mutex for cached tfm found, or new tfm created
* @cipher_name: the name of the cipher to search for and/or add
*
* Sets pointers to @tfm & @tfm_mutex matching @cipher_name.
* Searches for cached item first, and creates new if not found.
* Returns 0 on success, non-zero if adding new cipher failed
*/
int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
struct mutex **tfm_mutex,
char *cipher_name)
{
struct ecryptfs_key_tfm *key_tfm;
int rc = 0;
(*tfm) = NULL;
(*tfm_mutex) = NULL;
mutex_lock(&key_tfm_list_mutex);
if (!ecryptfs_tfm_exists(cipher_name, &key_tfm)) {
rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0);
if (rc) {
printk(KERN_ERR "Error adding new key_tfm to list; "
"rc = [%d]\n", rc);
goto out;
}
}
(*tfm) = key_tfm->key_tfm;
(*tfm_mutex) = &key_tfm->key_tfm_mutex;
out:
mutex_unlock(&key_tfm_list_mutex);
return rc;
}
/* 64 characters forming a 6-bit target field */
static unsigned char *portable_filename_chars = ("-.0123456789ABCD"
"EFGHIJKLMNOPQRST"
"UVWXYZabcdefghij"
"klmnopqrstuvwxyz");
/* We could either offset on every reverse map or just pad some 0x00's
* at the front here */
static const unsigned char filename_rev_map[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 31 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 39 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* 47 */
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, /* 55 */
0x0A, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 63 */
0x00, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, /* 71 */
0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, /* 79 */
0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, /* 87 */
0x23, 0x24, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */
0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */
0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */
0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */
};
/**
* ecryptfs_encode_for_filename
* @dst: Destination location for encoded filename
* @dst_size: Size of the encoded filename in bytes
* @src: Source location for the filename to encode
* @src_size: Size of the source in bytes
*/
void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size,
unsigned char *src, size_t src_size)
{
size_t num_blocks;
size_t block_num = 0;
size_t dst_offset = 0;
unsigned char last_block[3];
if (src_size == 0) {
(*dst_size) = 0;
goto out;
}
num_blocks = (src_size / 3);
if ((src_size % 3) == 0) {
memcpy(last_block, (&src[src_size - 3]), 3);
} else {
num_blocks++;
last_block[2] = 0x00;
switch (src_size % 3) {
case 1:
last_block[0] = src[src_size - 1];
last_block[1] = 0x00;
break;
case 2:
last_block[0] = src[src_size - 2];
last_block[1] = src[src_size - 1];
}
}
(*dst_size) = (num_blocks * 4);
if (!dst)
goto out;
while (block_num < num_blocks) {
unsigned char *src_block;
unsigned char dst_block[4];
if (block_num == (num_blocks - 1))
src_block = last_block;
else
src_block = &src[block_num * 3];
dst_block[0] = ((src_block[0] >> 2) & 0x3F);
dst_block[1] = (((src_block[0] << 4) & 0x30)
| ((src_block[1] >> 4) & 0x0F));
dst_block[2] = (((src_block[1] << 2) & 0x3C)
| ((src_block[2] >> 6) & 0x03));
dst_block[3] = (src_block[2] & 0x3F);
dst[dst_offset++] = portable_filename_chars[dst_block[0]];
dst[dst_offset++] = portable_filename_chars[dst_block[1]];
dst[dst_offset++] = portable_filename_chars[dst_block[2]];
dst[dst_offset++] = portable_filename_chars[dst_block[3]];
block_num++;
}
out:
return;
}
static size_t ecryptfs_max_decoded_size(size_t encoded_size)
{
/* Not exact; conservatively long. Every block of 4
* encoded characters decodes into a block of 3
* decoded characters. This segment of code provides
* the caller with the maximum amount of allocated
* space that @dst will need to point to in a
* subsequent call. */
return ((encoded_size + 1) * 3) / 4;
}
/**
* ecryptfs_decode_from_filename
* @dst: If NULL, this function only sets @dst_size and returns. If
* non-NULL, this function decodes the encoded octets in @src
* into the memory that @dst points to.
* @dst_size: Set to the size of the decoded string.
* @src: The encoded set of octets to decode.
* @src_size: The size of the encoded set of octets to decode.
*/
static void
ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
const unsigned char *src, size_t src_size)
{
u8 current_bit_offset = 0;
size_t src_byte_offset = 0;
size_t dst_byte_offset = 0;
if (dst == NULL) {
(*dst_size) = ecryptfs_max_decoded_size(src_size);
goto out;
}
while (src_byte_offset < src_size) {
unsigned char src_byte =
filename_rev_map[(int)src[src_byte_offset]];
switch (current_bit_offset) {
case 0:
dst[dst_byte_offset] = (src_byte << 2);
current_bit_offset = 6;
break;
case 6:
dst[dst_byte_offset++] |= (src_byte >> 4);
dst[dst_byte_offset] = ((src_byte & 0xF)
<< 4);
current_bit_offset = 4;
break;
case 4:
dst[dst_byte_offset++] |= (src_byte >> 2);
dst[dst_byte_offset] = (src_byte << 6);
current_bit_offset = 2;
break;
case 2:
dst[dst_byte_offset++] |= (src_byte);
current_bit_offset = 0;
break;
}
src_byte_offset++;
}
(*dst_size) = dst_byte_offset;
out:
return;
}
/**
* ecryptfs_encrypt_and_encode_filename - converts a plaintext file name to cipher text
* @crypt_stat: The crypt_stat struct associated with the file anem to encode
* @name: The plaintext name
* @length: The length of the plaintext
* @encoded_name: The encypted name
*
* Encrypts and encodes a filename into something that constitutes a
* valid filename for a filesystem, with printable characters.
*
* We assume that we have a properly initialized crypto context,
* pointed to by crypt_stat->tfm.
*
* Returns zero on success; non-zero on otherwise
*/
int ecryptfs_encrypt_and_encode_filename(
char **encoded_name,
size_t *encoded_name_size,
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
const char *name, size_t name_size)
{
size_t encoded_name_no_prefix_size;
int rc = 0;
(*encoded_name) = NULL;
(*encoded_name_size) = 0;
if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCRYPT_FILENAMES))
|| (mount_crypt_stat && (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES))) {
struct ecryptfs_filename *filename;
filename = kzalloc(sizeof(*filename), GFP_KERNEL);
if (!filename) {
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to kzalloc [%zd] bytes\n", __func__,
sizeof(*filename));
rc = -ENOMEM;
goto out;
}
filename->filename = (char *)name;
filename->filename_size = name_size;
rc = ecryptfs_encrypt_filename(filename, crypt_stat,
mount_crypt_stat);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt "
"filename; rc = [%d]\n", __func__, rc);
kfree(filename);
goto out;
}
ecryptfs_encode_for_filename(
NULL, &encoded_name_no_prefix_size,
filename->encrypted_filename,
filename->encrypted_filename_size);
if ((crypt_stat && (crypt_stat->flags
& ECRYPTFS_ENCFN_USE_MOUNT_FNEK))
|| (mount_crypt_stat
&& (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK)))
(*encoded_name_size) =
(ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
+ encoded_name_no_prefix_size);
else
(*encoded_name_size) =
(ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX_SIZE
+ encoded_name_no_prefix_size);
(*encoded_name) = kmalloc((*encoded_name_size) + 1, GFP_KERNEL);
if (!(*encoded_name)) {
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to kzalloc [%zd] bytes\n", __func__,
(*encoded_name_size));
rc = -ENOMEM;
kfree(filename->encrypted_filename);
kfree(filename);
goto out;
}
if ((crypt_stat && (crypt_stat->flags
& ECRYPTFS_ENCFN_USE_MOUNT_FNEK))
|| (mount_crypt_stat
&& (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) {
memcpy((*encoded_name),
ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE);
ecryptfs_encode_for_filename(
((*encoded_name)
+ ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE),
&encoded_name_no_prefix_size,
filename->encrypted_filename,
filename->encrypted_filename_size);
(*encoded_name_size) =
(ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
+ encoded_name_no_prefix_size);
(*encoded_name)[(*encoded_name_size)] = '\0';
} else {
rc = -EOPNOTSUPP;
}
if (rc) {
printk(KERN_ERR "%s: Error attempting to encode "
"encrypted filename; rc = [%d]\n", __func__,
rc);
kfree((*encoded_name));
(*encoded_name) = NULL;
(*encoded_name_size) = 0;
}
kfree(filename->encrypted_filename);
kfree(filename);
} else {
rc = ecryptfs_copy_filename(encoded_name,
encoded_name_size,
name, name_size);
}
out:
return rc;
}
/**
* ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext
* @plaintext_name: The plaintext name
* @plaintext_name_size: The plaintext name size
* @ecryptfs_dir_dentry: eCryptfs directory dentry
* @name: The filename in cipher text
* @name_size: The cipher text name size
*
* Decrypts and decodes the filename.
*
* Returns zero on error; non-zero otherwise
*/
int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
size_t *plaintext_name_size,
struct dentry *ecryptfs_dir_dentry,
const char *name, size_t name_size)
{
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
ecryptfs_dir_dentry->d_sb)->mount_crypt_stat;
char *decoded_name;
size_t decoded_name_size;
size_t packet_size;
int rc = 0;
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
&& !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
&& (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)
&& (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) {
const char *orig_name = name;
size_t orig_name_size = name_size;
name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
ecryptfs_decode_from_filename(NULL, &decoded_name_size,
name, name_size);
decoded_name = kmalloc(decoded_name_size, GFP_KERNEL);
if (!decoded_name) {
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to kmalloc [%zd] bytes\n", __func__,
decoded_name_size);
rc = -ENOMEM;
goto out;
}
ecryptfs_decode_from_filename(decoded_name, &decoded_name_size,
name, name_size);
rc = ecryptfs_parse_tag_70_packet(plaintext_name,
plaintext_name_size,
&packet_size,
mount_crypt_stat,
decoded_name,
decoded_name_size);
if (rc) {
printk(KERN_INFO "%s: Could not parse tag 70 packet "
"from filename; copying through filename "
"as-is\n", __func__);
rc = ecryptfs_copy_filename(plaintext_name,
plaintext_name_size,
orig_name, orig_name_size);
goto out_free;
}
} else {
rc = ecryptfs_copy_filename(plaintext_name,
plaintext_name_size,
name, name_size);
goto out;
}
out_free:
kfree(decoded_name);
out:
return rc;
}
#define ENC_NAME_MAX_BLOCKLEN_8_OR_16 143
int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct blkcipher_desc desc;
struct mutex *tfm_mutex;
size_t cipher_blocksize;
int rc;
if (!(mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) {
(*namelen) = lower_namelen;
return 0;
}
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex,
mount_crypt_stat->global_default_fn_cipher_name);
if (unlikely(rc)) {
(*namelen) = 0;
return rc;
}
mutex_lock(tfm_mutex);
cipher_blocksize = crypto_blkcipher_blocksize(desc.tfm);
mutex_unlock(tfm_mutex);
/* Return an exact amount for the common cases */
if (lower_namelen == NAME_MAX
&& (cipher_blocksize == 8 || cipher_blocksize == 16)) {
(*namelen) = ENC_NAME_MAX_BLOCKLEN_8_OR_16;
return 0;
}
/* Return a safe estimate for the uncommon cases */
(*namelen) = lower_namelen;
(*namelen) -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
/* Since this is the max decoded size, subtract 1 "decoded block" len */
(*namelen) = ecryptfs_max_decoded_size(*namelen) - 3;
(*namelen) -= ECRYPTFS_TAG_70_MAX_METADATA_SIZE;
(*namelen) -= ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES;
/* Worst case is that the filename is padded nearly a full block size */
(*namelen) -= cipher_blocksize - 1;
if ((*namelen) < 0)
(*namelen) = 0;
return 0;
}
| gpl-2.0 |
Split-Screen/android_kernel_oneplus_msm8996 | drivers/gpu/drm/radeon/radeon_prime.c | 1626 | 3587 | /*
* Copyright 2012 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* based on nouveau_prime.c
*
* Authors: Alex Deucher
*/
#include <drm/drmP.h>
#include "radeon.h"
#include <drm/radeon_drm.h>
#include <linux/dma-buf.h>
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int npages = bo->tbo.num_pages;
return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret;
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
&bo->dma_buf_vmap);
if (ret)
return ERR_PTR(ret);
return bo->dma_buf_vmap.virtual;
}
void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
struct reservation_object *resv = attach->dmabuf->resv;
struct radeon_device *rdev = dev->dev_private;
struct radeon_bo *bo;
int ret;
ww_mutex_lock(&resv->lock, NULL);
ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
ww_mutex_unlock(&resv->lock);
if (ret)
return ERR_PTR(ret);
mutex_lock(&rdev->gem.mutex);
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
return &bo->gem_base;
}
int radeon_gem_prime_pin(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
ret = radeon_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
radeon_bo_unreserve(bo);
return ret;
}
void radeon_gem_prime_unpin(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
ret = radeon_bo_reserve(bo, false);
if (unlikely(ret != 0))
return;
radeon_bo_unpin(bo);
radeon_bo_unreserve(bo);
}
struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
return bo->tbo.resv;
}
struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags)
{
struct radeon_bo *bo = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags);
}
| gpl-2.0 |
dwengen/linux | arch/powerpc/platforms/85xx/mpc8536_ds.c | 1626 | 2021 | /*
* MPC8536 DS Board Setup
*
* Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "mpc85xx.h"
void __init mpc8536_ds_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
}
/*
* Setup the architecture
*/
static void __init mpc8536_ds_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("mpc8536_ds_setup_arch()", 0);
fsl_pci_assign_primary();
swiotlb_detect_4g();
printk("MPC8536 DS board from Freescale Semiconductor\n");
}
machine_arch_initcall(mpc8536_ds, mpc85xx_common_publish_devices);
machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier);
/*
* Called very early, device-tree isn't unflattened
*/
static int __init mpc8536_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "fsl,mpc8536ds");
}
define_machine(mpc8536_ds) {
.name = "MPC8536 DS",
.probe = mpc8536_ds_probe,
.setup_arch = mpc8536_ds_setup_arch,
.init_IRQ = mpc8536_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
| gpl-2.0 |
dj-sand/android_kernel_htc_flounder | drivers/acpi/acpica/exmisc.c | 2138 | 20204 | /******************************************************************************
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "amlcode.h"
#include "amlresrc.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exmisc")
/*******************************************************************************
*
* FUNCTION: acpi_ex_get_object_reference
*
* PARAMETERS: obj_desc - Create a reference to this object
* return_desc - Where to store the reference
* walk_state - Current state
*
* RETURN: Status
*
* DESCRIPTION: Obtain and return a "reference" to the target object
* Common code for the ref_of_op and the cond_ref_of_op.
*
******************************************************************************/
acpi_status
acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
union acpi_operand_object **return_desc,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *reference_obj;
union acpi_operand_object *referenced_obj;
ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc);
*return_desc = NULL;
switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
case ACPI_DESC_TYPE_OPERAND:
if (obj_desc->common.type != ACPI_TYPE_LOCAL_REFERENCE) {
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/*
* Must be a reference to a Local or Arg
*/
switch (obj_desc->reference.class) {
case ACPI_REFCLASS_LOCAL:
case ACPI_REFCLASS_ARG:
case ACPI_REFCLASS_DEBUG:
/* The referenced object is the pseudo-node for the local/arg */
referenced_obj = obj_desc->reference.object;
break;
default:
ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
break;
case ACPI_DESC_TYPE_NAMED:
/*
* A named reference that has already been resolved to a Node
*/
referenced_obj = obj_desc;
break;
default:
ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X",
ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
return_ACPI_STATUS(AE_TYPE);
}
/* Create a new reference object */
reference_obj =
acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
if (!reference_obj) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
reference_obj->reference.class = ACPI_REFCLASS_REFOF;
reference_obj->reference.object = referenced_obj;
*return_desc = reference_obj;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Object %p Type [%s], returning Reference %p\n",
obj_desc, acpi_ut_get_object_type_name(obj_desc),
*return_desc));
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_concat_template
*
* PARAMETERS: operand0 - First source object
* operand1 - Second source object
* actual_return_desc - Where to place the return object
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Concatenate two resource templates
*
******************************************************************************/
acpi_status
acpi_ex_concat_template(union acpi_operand_object *operand0,
union acpi_operand_object *operand1,
union acpi_operand_object **actual_return_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status;
union acpi_operand_object *return_desc;
u8 *new_buf;
u8 *end_tag;
acpi_size length0;
acpi_size length1;
acpi_size new_length;
ACPI_FUNCTION_TRACE(ex_concat_template);
/*
* Find the end_tag descriptor in each resource template.
* Note1: returned pointers point TO the end_tag, not past it.
* Note2: zero-length buffers are allowed; treated like one end_tag
*/
/* Get the length of the first resource template */
status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
/* Get the length of the second resource template */
status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
/* Combine both lengths, minimum size will be 2 for end_tag */
new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
/* Create a new buffer object for the result (with one end_tag) */
return_desc = acpi_ut_create_buffer_object(new_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/*
* Copy the templates to the new buffer, 0 first, then 1 follows. One
* end_tag descriptor is copied from Operand1.
*/
new_buf = return_desc->buffer.pointer;
ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length0);
ACPI_MEMCPY(new_buf + length0, operand1->buffer.pointer, length1);
/* Insert end_tag and set the checksum to zero, means "ignore checksum" */
new_buf[new_length - 1] = 0;
new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
/* Return the completed resource template */
*actual_return_desc = return_desc;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_do_concatenate
*
* PARAMETERS: operand0 - First source object
* operand1 - Second source object
* actual_return_desc - Where to place the return object
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Concatenate two objects OF THE SAME TYPE.
*
******************************************************************************/
acpi_status
acpi_ex_do_concatenate(union acpi_operand_object *operand0,
union acpi_operand_object *operand1,
union acpi_operand_object **actual_return_desc,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *local_operand1 = operand1;
union acpi_operand_object *return_desc;
char *new_buf;
acpi_status status;
ACPI_FUNCTION_TRACE(ex_do_concatenate);
/*
* Convert the second operand if necessary. The first operand
* determines the type of the second operand, (See the Data Types
* section of the ACPI specification.) Both object types are
* guaranteed to be either Integer/String/Buffer by the operand
* resolution mechanism.
*/
switch (operand0->common.type) {
case ACPI_TYPE_INTEGER:
status =
acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
break;
case ACPI_TYPE_STRING:
status = acpi_ex_convert_to_string(operand1, &local_operand1,
ACPI_IMPLICIT_CONVERT_HEX);
break;
case ACPI_TYPE_BUFFER:
status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
break;
default:
ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
}
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/*
* Both operands are now known to be the same object type
* (Both are Integer, String, or Buffer), and we can now perform the
* concatenation.
*/
/*
* There are three cases to handle:
*
* 1) Two Integers concatenated to produce a new Buffer
* 2) Two Strings concatenated to produce a new String
* 3) Two Buffers concatenated to produce a new Buffer
*/
switch (operand0->common.type) {
case ACPI_TYPE_INTEGER:
/* Result of two Integers is a Buffer */
/* Need enough buffer space for two integers */
return_desc = acpi_ut_create_buffer_object((acpi_size)
ACPI_MUL_2
(acpi_gbl_integer_byte_width));
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
new_buf = (char *)return_desc->buffer.pointer;
/* Copy the first integer, LSB first */
ACPI_MEMCPY(new_buf, &operand0->integer.value,
acpi_gbl_integer_byte_width);
/* Copy the second integer (LSB first) after the first */
ACPI_MEMCPY(new_buf + acpi_gbl_integer_byte_width,
&local_operand1->integer.value,
acpi_gbl_integer_byte_width);
break;
case ACPI_TYPE_STRING:
/* Result of two Strings is a String */
return_desc = acpi_ut_create_string_object(((acpi_size)
operand0->string.
length +
local_operand1->
string.length));
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
new_buf = return_desc->string.pointer;
/* Concatenate the strings */
ACPI_STRCPY(new_buf, operand0->string.pointer);
ACPI_STRCPY(new_buf + operand0->string.length,
local_operand1->string.pointer);
break;
case ACPI_TYPE_BUFFER:
/* Result of two Buffers is a Buffer */
return_desc = acpi_ut_create_buffer_object(((acpi_size)
operand0->buffer.
length +
local_operand1->
buffer.length));
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
new_buf = (char *)return_desc->buffer.pointer;
/* Concatenate the buffers */
ACPI_MEMCPY(new_buf, operand0->buffer.pointer,
operand0->buffer.length);
ACPI_MEMCPY(new_buf + operand0->buffer.length,
local_operand1->buffer.pointer,
local_operand1->buffer.length);
break;
default:
/* Invalid object type, should not happen here */
ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
}
*actual_return_desc = return_desc;
cleanup:
if (local_operand1 != operand1) {
acpi_ut_remove_reference(local_operand1);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_do_math_op
*
* PARAMETERS: opcode - AML opcode
* integer0 - Integer operand #0
* integer1 - Integer operand #1
*
* RETURN: Integer result of the operation
*
* DESCRIPTION: Execute a math AML opcode. The purpose of having all of the
* math functions here is to prevent a lot of pointer dereferencing
* to obtain the operands.
*
******************************************************************************/
u64 acpi_ex_do_math_op(u16 opcode, u64 integer0, u64 integer1)
{
ACPI_FUNCTION_ENTRY();
switch (opcode) {
case AML_ADD_OP: /* Add (Integer0, Integer1, Result) */
return (integer0 + integer1);
case AML_BIT_AND_OP: /* And (Integer0, Integer1, Result) */
return (integer0 & integer1);
case AML_BIT_NAND_OP: /* NAnd (Integer0, Integer1, Result) */
return (~(integer0 & integer1));
case AML_BIT_OR_OP: /* Or (Integer0, Integer1, Result) */
return (integer0 | integer1);
case AML_BIT_NOR_OP: /* NOr (Integer0, Integer1, Result) */
return (~(integer0 | integer1));
case AML_BIT_XOR_OP: /* XOr (Integer0, Integer1, Result) */
return (integer0 ^ integer1);
case AML_MULTIPLY_OP: /* Multiply (Integer0, Integer1, Result) */
return (integer0 * integer1);
case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */
/*
* We need to check if the shiftcount is larger than the integer bit
* width since the behavior of this is not well-defined in the C language.
*/
if (integer1 >= acpi_gbl_integer_bit_width) {
return (0);
}
return (integer0 << integer1);
case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */
/*
* We need to check if the shiftcount is larger than the integer bit
* width since the behavior of this is not well-defined in the C language.
*/
if (integer1 >= acpi_gbl_integer_bit_width) {
return (0);
}
return (integer0 >> integer1);
case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */
return (integer0 - integer1);
default:
return (0);
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_do_logical_numeric_op
*
* PARAMETERS: opcode - AML opcode
* integer0 - Integer operand #0
* integer1 - Integer operand #1
* logical_result - TRUE/FALSE result of the operation
*
* RETURN: Status
*
* DESCRIPTION: Execute a logical "Numeric" AML opcode. For these Numeric
* operators (LAnd and LOr), both operands must be integers.
*
* Note: cleanest machine code seems to be produced by the code
* below, rather than using statements of the form:
* Result = (Integer0 && Integer1);
*
******************************************************************************/
acpi_status
acpi_ex_do_logical_numeric_op(u16 opcode,
u64 integer0, u64 integer1, u8 *logical_result)
{
acpi_status status = AE_OK;
u8 local_result = FALSE;
ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op);
switch (opcode) {
case AML_LAND_OP: /* LAnd (Integer0, Integer1) */
if (integer0 && integer1) {
local_result = TRUE;
}
break;
case AML_LOR_OP: /* LOr (Integer0, Integer1) */
if (integer0 || integer1) {
local_result = TRUE;
}
break;
default:
status = AE_AML_INTERNAL;
break;
}
/* Return the logical result and status */
*logical_result = local_result;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_do_logical_op
*
* PARAMETERS: opcode - AML opcode
* operand0 - operand #0
* operand1 - operand #1
* logical_result - TRUE/FALSE result of the operation
*
* RETURN: Status
*
* DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the
* functions here is to prevent a lot of pointer dereferencing
* to obtain the operands and to simplify the generation of the
* logical value. For the Numeric operators (LAnd and LOr), both
* operands must be integers. For the other logical operators,
* operands can be any combination of Integer/String/Buffer. The
* first operand determines the type to which the second operand
* will be converted.
*
* Note: cleanest machine code seems to be produced by the code
* below, rather than using statements of the form:
* Result = (Operand0 == Operand1);
*
******************************************************************************/
acpi_status
acpi_ex_do_logical_op(u16 opcode,
union acpi_operand_object *operand0,
union acpi_operand_object *operand1, u8 * logical_result)
{
union acpi_operand_object *local_operand1 = operand1;
u64 integer0;
u64 integer1;
u32 length0;
u32 length1;
acpi_status status = AE_OK;
u8 local_result = FALSE;
int compare;
ACPI_FUNCTION_TRACE(ex_do_logical_op);
/*
* Convert the second operand if necessary. The first operand
* determines the type of the second operand, (See the Data Types
* section of the ACPI 3.0+ specification.) Both object types are
* guaranteed to be either Integer/String/Buffer by the operand
* resolution mechanism.
*/
switch (operand0->common.type) {
case ACPI_TYPE_INTEGER:
status =
acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
break;
case ACPI_TYPE_STRING:
status = acpi_ex_convert_to_string(operand1, &local_operand1,
ACPI_IMPLICIT_CONVERT_HEX);
break;
case ACPI_TYPE_BUFFER:
status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
break;
default:
status = AE_AML_INTERNAL;
break;
}
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/*
* Two cases: 1) Both Integers, 2) Both Strings or Buffers
*/
if (operand0->common.type == ACPI_TYPE_INTEGER) {
/*
* 1) Both operands are of type integer
* Note: local_operand1 may have changed above
*/
integer0 = operand0->integer.value;
integer1 = local_operand1->integer.value;
switch (opcode) {
case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
if (integer0 == integer1) {
local_result = TRUE;
}
break;
case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
if (integer0 > integer1) {
local_result = TRUE;
}
break;
case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
if (integer0 < integer1) {
local_result = TRUE;
}
break;
default:
status = AE_AML_INTERNAL;
break;
}
} else {
/*
* 2) Both operands are Strings or both are Buffers
* Note: Code below takes advantage of common Buffer/String
* object fields. local_operand1 may have changed above. Use
* memcmp to handle nulls in buffers.
*/
length0 = operand0->buffer.length;
length1 = local_operand1->buffer.length;
/* Lexicographic compare: compare the data bytes */
compare = ACPI_MEMCMP(operand0->buffer.pointer,
local_operand1->buffer.pointer,
(length0 > length1) ? length1 : length0);
switch (opcode) {
case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
/* Length and all bytes must be equal */
if ((length0 == length1) && (compare == 0)) {
/* Length and all bytes match ==> TRUE */
local_result = TRUE;
}
break;
case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
if (compare > 0) {
local_result = TRUE;
goto cleanup; /* TRUE */
}
if (compare < 0) {
goto cleanup; /* FALSE */
}
/* Bytes match (to shortest length), compare lengths */
if (length0 > length1) {
local_result = TRUE;
}
break;
case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
if (compare > 0) {
goto cleanup; /* FALSE */
}
if (compare < 0) {
local_result = TRUE;
goto cleanup; /* TRUE */
}
/* Bytes match (to shortest length), compare lengths */
if (length0 < length1) {
local_result = TRUE;
}
break;
default:
status = AE_AML_INTERNAL;
break;
}
}
cleanup:
/* New object was created if implicit conversion performed - delete */
if (local_operand1 != operand1) {
acpi_ut_remove_reference(local_operand1);
}
/* Return the logical result and status */
*logical_result = local_result;
return_ACPI_STATUS(status);
}
| gpl-2.0 |
davidmueller13/ZenKernel_Flounder | drivers/acpi/acpica/nsaccess.c | 2138 | 18970 | /*******************************************************************************
*
* Module Name: nsaccess - Top-level functions for accessing ACPI namespace
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "amlcode.h"
#include "acnamesp.h"
#include "acdispat.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsaccess")
/*******************************************************************************
*
* FUNCTION: acpi_ns_root_initialize
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Allocate and initialize the default root named objects
*
* MUTEX: Locks namespace for entire execution
*
******************************************************************************/
acpi_status acpi_ns_root_initialize(void)
{
acpi_status status;
const struct acpi_predefined_names *init_val = NULL;
struct acpi_namespace_node *new_node;
union acpi_operand_object *obj_desc;
acpi_string val = NULL;
ACPI_FUNCTION_TRACE(ns_root_initialize);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* The global root ptr is initially NULL, so a non-NULL value indicates
* that acpi_ns_root_initialize() has already been called; just return.
*/
if (acpi_gbl_root_node) {
status = AE_OK;
goto unlock_and_exit;
}
/*
* Tell the rest of the subsystem that the root is initialized
* (This is OK because the namespace is locked)
*/
acpi_gbl_root_node = &acpi_gbl_root_node_struct;
/* Enter the pre-defined names in the name table */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Entering predefined entries into namespace\n"));
for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) {
/* _OSI is optional for now, will be permanent later */
if (!ACPI_STRCMP(init_val->name, "_OSI")
&& !acpi_gbl_create_osi_method) {
continue;
}
status = acpi_ns_lookup(NULL, init_val->name, init_val->type,
ACPI_IMODE_LOAD_PASS2,
ACPI_NS_NO_UPSEARCH, NULL, &new_node);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not create predefined name %s",
init_val->name));
continue;
}
/*
* Name entered successfully. If entry in pre_defined_names[] specifies
* an initial value, create the initial value.
*/
if (init_val->val) {
status = acpi_os_predefined_override(init_val, &val);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not override predefined %s",
init_val->name));
}
if (!val) {
val = init_val->val;
}
/*
* Entry requests an initial value, allocate a
* descriptor for it.
*/
obj_desc =
acpi_ut_create_internal_object(init_val->type);
if (!obj_desc) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
/*
* Convert value string from table entry to
* internal representation. Only types actually
* used for initial values are implemented here.
*/
switch (init_val->type) {
case ACPI_TYPE_METHOD:
obj_desc->method.param_count =
(u8) ACPI_TO_INTEGER(val);
obj_desc->common.flags |= AOPOBJ_DATA_VALID;
#if defined (ACPI_ASL_COMPILER)
/* Save the parameter count for the iASL compiler */
new_node->value = obj_desc->method.param_count;
#else
/* Mark this as a very SPECIAL method */
obj_desc->method.info_flags =
ACPI_METHOD_INTERNAL_ONLY;
obj_desc->method.dispatch.implementation =
acpi_ut_osi_implementation;
#endif
break;
case ACPI_TYPE_INTEGER:
obj_desc->integer.value = ACPI_TO_INTEGER(val);
break;
case ACPI_TYPE_STRING:
/* Build an object around the static string */
obj_desc->string.length = (u32)ACPI_STRLEN(val);
obj_desc->string.pointer = val;
obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
break;
case ACPI_TYPE_MUTEX:
obj_desc->mutex.node = new_node;
obj_desc->mutex.sync_level =
(u8) (ACPI_TO_INTEGER(val) - 1);
/* Create a mutex */
status =
acpi_os_create_mutex(&obj_desc->mutex.
os_mutex);
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(obj_desc);
goto unlock_and_exit;
}
/* Special case for ACPI Global Lock */
if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
acpi_gbl_global_lock_mutex = obj_desc;
/* Create additional counting semaphore for global lock */
status =
acpi_os_create_semaphore(1, 0,
&acpi_gbl_global_lock_semaphore);
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference
(obj_desc);
goto unlock_and_exit;
}
}
break;
default:
ACPI_ERROR((AE_INFO,
"Unsupported initial type value 0x%X",
init_val->type));
acpi_ut_remove_reference(obj_desc);
obj_desc = NULL;
continue;
}
/* Store pointer to value descriptor in the Node */
status = acpi_ns_attach_object(new_node, obj_desc,
obj_desc->common.type);
/* Remove local reference to the object */
acpi_ut_remove_reference(obj_desc);
}
}
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
/* Save a handle to "_GPE", it is always present */
if (ACPI_SUCCESS(status)) {
status = acpi_ns_get_node(NULL, "\\_GPE", ACPI_NS_NO_UPSEARCH,
&acpi_gbl_fadt_gpe_device);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_lookup
*
* PARAMETERS: scope_info - Current scope info block
* pathname - Search pathname, in internal format
* (as represented in the AML stream)
* type - Type associated with name
* interpreter_mode - IMODE_LOAD_PASS2 => add name if not found
* flags - Flags describing the search restrictions
* walk_state - Current state of the walk
* return_node - Where the Node is placed (if found
* or created successfully)
*
* RETURN: Status
*
* DESCRIPTION: Find or enter the passed name in the name space.
* Log an error if name not found in Exec mode.
*
* MUTEX: Assumes namespace is locked.
*
******************************************************************************/
acpi_status
acpi_ns_lookup(union acpi_generic_state *scope_info,
char *pathname,
acpi_object_type type,
acpi_interpreter_mode interpreter_mode,
u32 flags,
struct acpi_walk_state *walk_state,
struct acpi_namespace_node **return_node)
{
acpi_status status;
char *path = pathname;
struct acpi_namespace_node *prefix_node;
struct acpi_namespace_node *current_node = NULL;
struct acpi_namespace_node *this_node = NULL;
u32 num_segments;
u32 num_carats;
acpi_name simple_name;
acpi_object_type type_to_check_for;
acpi_object_type this_search_type;
u32 search_parent_flag = ACPI_NS_SEARCH_PARENT;
u32 local_flags;
ACPI_FUNCTION_TRACE(ns_lookup);
if (!return_node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT);
*return_node = ACPI_ENTRY_NOT_FOUND;
acpi_gbl_ns_lookup_count++;
if (!acpi_gbl_root_node) {
return_ACPI_STATUS(AE_NO_NAMESPACE);
}
/* Get the prefix scope. A null scope means use the root scope */
if ((!scope_info) || (!scope_info->scope.node)) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Null scope prefix, using root node (%p)\n",
acpi_gbl_root_node));
prefix_node = acpi_gbl_root_node;
} else {
prefix_node = scope_info->scope.node;
if (ACPI_GET_DESCRIPTOR_TYPE(prefix_node) !=
ACPI_DESC_TYPE_NAMED) {
ACPI_ERROR((AE_INFO, "%p is not a namespace node [%s]",
prefix_node,
acpi_ut_get_descriptor_name(prefix_node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
if (!(flags & ACPI_NS_PREFIX_IS_SCOPE)) {
/*
* This node might not be a actual "scope" node (such as a
* Device/Method, etc.) It could be a Package or other object
* node. Backup up the tree to find the containing scope node.
*/
while (!acpi_ns_opens_scope(prefix_node->type) &&
prefix_node->type != ACPI_TYPE_ANY) {
prefix_node = prefix_node->parent;
}
}
}
/* Save type. TBD: may be no longer necessary */
type_to_check_for = type;
/*
* Begin examination of the actual pathname
*/
if (!pathname) {
/* A Null name_path is allowed and refers to the root */
num_segments = 0;
this_node = acpi_gbl_root_node;
path = "";
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Null Pathname (Zero segments), Flags=%X\n",
flags));
} else {
/*
* Name pointer is valid (and must be in internal name format)
*
* Check for scope prefixes:
*
* As represented in the AML stream, a namepath consists of an
* optional scope prefix followed by a name segment part.
*
* If present, the scope prefix is either a Root Prefix (in
* which case the name is fully qualified), or one or more
* Parent Prefixes (in which case the name's scope is relative
* to the current scope).
*/
if (*path == (u8) AML_ROOT_PREFIX) {
/* Pathname is fully qualified, start from the root */
this_node = acpi_gbl_root_node;
search_parent_flag = ACPI_NS_NO_UPSEARCH;
/* Point to name segment part */
path++;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Path is absolute from root [%p]\n",
this_node));
} else {
/* Pathname is relative to current scope, start there */
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Searching relative to prefix scope [%4.4s] (%p)\n",
acpi_ut_get_node_name(prefix_node),
prefix_node));
/*
* Handle multiple Parent Prefixes (carat) by just getting
* the parent node for each prefix instance.
*/
this_node = prefix_node;
num_carats = 0;
while (*path == (u8) AML_PARENT_PREFIX) {
/* Name is fully qualified, no search rules apply */
search_parent_flag = ACPI_NS_NO_UPSEARCH;
/*
* Point past this prefix to the name segment
* part or the next Parent Prefix
*/
path++;
/* Backup to the parent node */
num_carats++;
this_node = this_node->parent;
if (!this_node) {
/* Current scope has no parent scope */
ACPI_ERROR((AE_INFO,
"ACPI path has too many parent prefixes (^) "
"- reached beyond root node"));
return_ACPI_STATUS(AE_NOT_FOUND);
}
}
if (search_parent_flag == ACPI_NS_NO_UPSEARCH) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Search scope is [%4.4s], path has %u carat(s)\n",
acpi_ut_get_node_name
(this_node), num_carats));
}
}
/*
* Determine the number of ACPI name segments in this pathname.
*
* The segment part consists of either:
* - A Null name segment (0)
* - A dual_name_prefix followed by two 4-byte name segments
* - A multi_name_prefix followed by a byte indicating the
* number of segments and the segments themselves.
* - A single 4-byte name segment
*
* Examine the name prefix opcode, if any, to determine the number of
* segments.
*/
switch (*path) {
case 0:
/*
* Null name after a root or parent prefixes. We already
* have the correct target node and there are no name segments.
*/
num_segments = 0;
type = this_node->type;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Prefix-only Pathname (Zero name segments), Flags=%X\n",
flags));
break;
case AML_DUAL_NAME_PREFIX:
/* More than one name_seg, search rules do not apply */
search_parent_flag = ACPI_NS_NO_UPSEARCH;
/* Two segments, point to first name segment */
num_segments = 2;
path++;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Dual Pathname (2 segments, Flags=%X)\n",
flags));
break;
case AML_MULTI_NAME_PREFIX_OP:
/* More than one name_seg, search rules do not apply */
search_parent_flag = ACPI_NS_NO_UPSEARCH;
/* Extract segment count, point to first name segment */
path++;
num_segments = (u32) (u8) * path;
path++;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Multi Pathname (%u Segments, Flags=%X)\n",
num_segments, flags));
break;
default:
/*
* Not a Null name, no Dual or Multi prefix, hence there is
* only one name segment and Pathname is already pointing to it.
*/
num_segments = 1;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Simple Pathname (1 segment, Flags=%X)\n",
flags));
break;
}
ACPI_DEBUG_EXEC(acpi_ns_print_pathname(num_segments, path));
}
/*
* Search namespace for each segment of the name. Loop through and
* verify (or add to the namespace) each name segment.
*
* The object type is significant only at the last name
* segment. (We don't care about the types along the path, only
* the type of the final target object.)
*/
this_search_type = ACPI_TYPE_ANY;
current_node = this_node;
while (num_segments && current_node) {
num_segments--;
if (!num_segments) {
/* This is the last segment, enable typechecking */
this_search_type = type;
/*
* Only allow automatic parent search (search rules) if the caller
* requested it AND we have a single, non-fully-qualified name_seg
*/
if ((search_parent_flag != ACPI_NS_NO_UPSEARCH) &&
(flags & ACPI_NS_SEARCH_PARENT)) {
local_flags |= ACPI_NS_SEARCH_PARENT;
}
/* Set error flag according to caller */
if (flags & ACPI_NS_ERROR_IF_FOUND) {
local_flags |= ACPI_NS_ERROR_IF_FOUND;
}
}
/* Extract one ACPI name from the front of the pathname */
ACPI_MOVE_32_TO_32(&simple_name, path);
/* Try to find the single (4 character) ACPI name */
status =
acpi_ns_search_and_enter(simple_name, walk_state,
current_node, interpreter_mode,
this_search_type, local_flags,
&this_node);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_FOUND) {
/* Name not found in ACPI namespace */
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Name [%4.4s] not found in scope [%4.4s] %p\n",
(char *)&simple_name,
(char *)¤t_node->name,
current_node));
}
*return_node = this_node;
return_ACPI_STATUS(status);
}
/* More segments to follow? */
if (num_segments > 0) {
/*
* If we have an alias to an object that opens a scope (such as a
* device or processor), we need to dereference the alias here so
* that we can access any children of the original node (via the
* remaining segments).
*/
if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) {
if (!this_node->object) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
if (acpi_ns_opens_scope
(((struct acpi_namespace_node *)
this_node->object)->type)) {
this_node =
(struct acpi_namespace_node *)
this_node->object;
}
}
}
/* Special handling for the last segment (num_segments == 0) */
else {
/*
* Sanity typecheck of the target object:
*
* If 1) This is the last segment (num_segments == 0)
* 2) And we are looking for a specific type
* (Not checking for TYPE_ANY)
* 3) Which is not an alias
* 4) Which is not a local type (TYPE_SCOPE)
* 5) And the type of target object is known (not TYPE_ANY)
* 6) And target object does not match what we are looking for
*
* Then we have a type mismatch. Just warn and ignore it.
*/
if ((type_to_check_for != ACPI_TYPE_ANY) &&
(type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) &&
(type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS)
&& (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE)
&& (this_node->type != ACPI_TYPE_ANY)
&& (this_node->type != type_to_check_for)) {
/* Complain about a type mismatch */
ACPI_WARNING((AE_INFO,
"NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
ACPI_CAST_PTR(char, &simple_name),
acpi_ut_get_type_name(this_node->
type),
acpi_ut_get_type_name
(type_to_check_for)));
}
/*
* If this is the last name segment and we are not looking for a
* specific type, but the type of found object is known, use that
* type to (later) see if it opens a scope.
*/
if (type == ACPI_TYPE_ANY) {
type = this_node->type;
}
}
/* Point to next name segment and make this node current */
path += ACPI_NAME_SIZE;
current_node = this_node;
}
/* Always check if we need to open a new scope */
if (!(flags & ACPI_NS_DONT_OPEN_SCOPE) && (walk_state)) {
/*
* If entry is a type which opens a scope, push the new scope on the
* scope stack.
*/
if (acpi_ns_opens_scope(type)) {
status =
acpi_ds_scope_stack_push(this_node, type,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
}
*return_node = this_node;
return_ACPI_STATUS(AE_OK);
}
| gpl-2.0 |
Troj80/T.J.T-Kernel-vivo | fs/xfs/xfs_btree.c | 2650 | 96426 | /*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_btree.h"
#include "xfs_btree_trace.h"
#include "xfs_error.h"
#include "xfs_trace.h"
/*
* Cursor allocation zone.
*/
kmem_zone_t *xfs_btree_cur_zone;
/*
* Btree magic numbers.
*/
const __uint32_t xfs_magics[XFS_BTNUM_MAX] = {
XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC
};
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_lblock(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_btree_block *block, /* btree long form block pointer */
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer for block, if any */
{
int lblock_ok; /* block passes checks */
struct xfs_mount *mp; /* file system mount point */
mp = cur->bc_mp;
lblock_ok =
be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] &&
be16_to_cpu(block->bb_level) == level &&
be16_to_cpu(block->bb_numrecs) <=
cur->bc_ops->get_maxrecs(cur, level) &&
block->bb_u.l.bb_leftsib &&
(be64_to_cpu(block->bb_u.l.bb_leftsib) == NULLDFSBNO ||
XFS_FSB_SANITY_CHECK(mp,
be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
block->bb_u.l.bb_rightsib &&
(be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO ||
XFS_FSB_SANITY_CHECK(mp,
be64_to_cpu(block->bb_u.l.bb_rightsib)));
if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
XFS_ERRTAG_BTREE_CHECK_LBLOCK,
XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
if (bp)
trace_xfs_btree_corrupt(bp, _RET_IP_);
XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW,
mp);
return XFS_ERROR(EFSCORRUPTED);
}
return 0;
}
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_sblock(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_btree_block *block, /* btree short form block pointer */
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer containing block */
{
struct xfs_buf *agbp; /* buffer for ag. freespace struct */
struct xfs_agf *agf; /* ag. freespace structure */
xfs_agblock_t agflen; /* native ag. freespace length */
int sblock_ok; /* block passes checks */
agbp = cur->bc_private.a.agbp;
agf = XFS_BUF_TO_AGF(agbp);
agflen = be32_to_cpu(agf->agf_length);
sblock_ok =
be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] &&
be16_to_cpu(block->bb_level) == level &&
be16_to_cpu(block->bb_numrecs) <=
cur->bc_ops->get_maxrecs(cur, level) &&
(be32_to_cpu(block->bb_u.s.bb_leftsib) == NULLAGBLOCK ||
be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
block->bb_u.s.bb_leftsib &&
(be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK ||
be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
block->bb_u.s.bb_rightsib;
if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp,
XFS_ERRTAG_BTREE_CHECK_SBLOCK,
XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
if (bp)
trace_xfs_btree_corrupt(bp, _RET_IP_);
XFS_CORRUPTION_ERROR("xfs_btree_check_sblock",
XFS_ERRLEVEL_LOW, cur->bc_mp, block);
return XFS_ERROR(EFSCORRUPTED);
}
return 0;
}
/*
* Debug routine: check that block header is ok.
*/
int
xfs_btree_check_block(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_btree_block *block, /* generic btree block pointer */
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer containing block, if any */
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
return xfs_btree_check_lblock(cur, block, level, bp);
else
return xfs_btree_check_sblock(cur, block, level, bp);
}
/*
* Check that (long) pointer is ok.
*/
int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_lptr(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_dfsbno_t bno, /* btree block disk address */
int level) /* btree block level */
{
XFS_WANT_CORRUPTED_RETURN(
level > 0 &&
bno != NULLDFSBNO &&
XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
return 0;
}
#ifdef DEBUG
/*
* Check that (short) pointer is ok.
*/
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_sptr(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* btree block disk address */
int level) /* btree block level */
{
xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks;
XFS_WANT_CORRUPTED_RETURN(
level > 0 &&
bno != NULLAGBLOCK &&
bno != 0 &&
bno < agblocks);
return 0;
}
/*
* Check that block ptr is ok.
*/
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_ptr(
struct xfs_btree_cur *cur, /* btree cursor */
union xfs_btree_ptr *ptr, /* btree block disk address */
int index, /* offset from ptr to check */
int level) /* btree block level */
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
return xfs_btree_check_lptr(cur,
be64_to_cpu((&ptr->l)[index]), level);
} else {
return xfs_btree_check_sptr(cur,
be32_to_cpu((&ptr->s)[index]), level);
}
}
#endif
/*
* Delete the btree cursor.
*/
void
xfs_btree_del_cursor(
xfs_btree_cur_t *cur, /* btree cursor */
int error) /* del because of error */
{
int i; /* btree level */
/*
* Clear the buffer pointers, and release the buffers.
* If we're doing this in the face of an error, we
* need to make sure to inspect all of the entries
* in the bc_bufs array for buffers to be unlocked.
* This is because some of the btree code works from
* level n down to 0, and if we get an error along
* the way we won't have initialized all the entries
* down to 0.
*/
for (i = 0; i < cur->bc_nlevels; i++) {
if (cur->bc_bufs[i])
xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
else if (!error)
break;
}
/*
* Can't free a bmap cursor without having dealt with the
* allocated indirect blocks' accounting.
*/
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
cur->bc_private.b.allocated == 0);
/*
* Free the cursor.
*/
kmem_zone_free(xfs_btree_cur_zone, cur);
}
/*
* Duplicate the btree cursor.
* Allocate a new one, copy the record, re-get the buffers.
*/
int /* error */
xfs_btree_dup_cursor(
xfs_btree_cur_t *cur, /* input cursor */
xfs_btree_cur_t **ncur) /* output cursor */
{
xfs_buf_t *bp; /* btree block's buffer pointer */
int error; /* error return value */
int i; /* level number of btree block */
xfs_mount_t *mp; /* mount structure for filesystem */
xfs_btree_cur_t *new; /* new cursor value */
xfs_trans_t *tp; /* transaction pointer, can be NULL */
tp = cur->bc_tp;
mp = cur->bc_mp;
/*
* Allocate a new cursor like the old one.
*/
new = cur->bc_ops->dup_cursor(cur);
/*
* Copy the record currently in the cursor.
*/
new->bc_rec = cur->bc_rec;
/*
* For each level current, re-get the buffer and copy the ptr value.
*/
for (i = 0; i < new->bc_nlevels; i++) {
new->bc_ptrs[i] = cur->bc_ptrs[i];
new->bc_ra[i] = cur->bc_ra[i];
if ((bp = cur->bc_bufs[i])) {
if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
XFS_BUF_ADDR(bp), mp->m_bsize, 0, &bp))) {
xfs_btree_del_cursor(new, error);
*ncur = NULL;
return error;
}
new->bc_bufs[i] = bp;
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
} else
new->bc_bufs[i] = NULL;
}
*ncur = new;
return 0;
}
/*
* XFS btree block layout and addressing:
*
* There are two types of blocks in the btree: leaf and non-leaf blocks.
*
* The leaf record start with a header then followed by records containing
* the values. A non-leaf block also starts with the same header, and
* then first contains lookup keys followed by an equal number of pointers
* to the btree blocks at the previous level.
*
* +--------+-------+-------+-------+-------+-------+-------+
* Leaf: | header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N |
* +--------+-------+-------+-------+-------+-------+-------+
*
* +--------+-------+-------+-------+-------+-------+-------+
* Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N |
* +--------+-------+-------+-------+-------+-------+-------+
*
* The header is called struct xfs_btree_block for reasons better left unknown
* and comes in different versions for short (32bit) and long (64bit) block
* pointers. The record and key structures are defined by the btree instances
* and opaque to the btree core. The block pointers are simple disk endian
* integers, available in a short (32bit) and long (64bit) variant.
*
* The helpers below calculate the offset of a given record, key or pointer
* into a btree block (xfs_btree_*_offset) or return a pointer to the given
* record, key or pointer (xfs_btree_*_addr). Note that all addressing
* inside the btree block is done using indices starting at one, not zero!
*/
/*
* Return size of the btree block header for this btree instance.
*/
static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
{
return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
XFS_BTREE_LBLOCK_LEN :
XFS_BTREE_SBLOCK_LEN;
}
/*
* Return size of btree block pointers for this btree instance.
*/
static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
{
return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
sizeof(__be64) : sizeof(__be32);
}
/*
* Calculate offset of the n-th record in a btree block.
*/
STATIC size_t
xfs_btree_rec_offset(
struct xfs_btree_cur *cur,
int n)
{
return xfs_btree_block_len(cur) +
(n - 1) * cur->bc_ops->rec_len;
}
/*
* Calculate offset of the n-th key in a btree block.
*/
STATIC size_t
xfs_btree_key_offset(
struct xfs_btree_cur *cur,
int n)
{
return xfs_btree_block_len(cur) +
(n - 1) * cur->bc_ops->key_len;
}
/*
* Calculate offset of the n-th block pointer in a btree block.
*/
STATIC size_t
xfs_btree_ptr_offset(
struct xfs_btree_cur *cur,
int n,
int level)
{
return xfs_btree_block_len(cur) +
cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
(n - 1) * xfs_btree_ptr_len(cur);
}
/*
* Return a pointer to the n-th record in the btree block.
*/
STATIC union xfs_btree_rec *
xfs_btree_rec_addr(
struct xfs_btree_cur *cur,
int n,
struct xfs_btree_block *block)
{
return (union xfs_btree_rec *)
((char *)block + xfs_btree_rec_offset(cur, n));
}
/*
* Return a pointer to the n-th key in the btree block.
*/
STATIC union xfs_btree_key *
xfs_btree_key_addr(
struct xfs_btree_cur *cur,
int n,
struct xfs_btree_block *block)
{
return (union xfs_btree_key *)
((char *)block + xfs_btree_key_offset(cur, n));
}
/*
* Return a pointer to the n-th block pointer in the btree block.
*/
STATIC union xfs_btree_ptr *
xfs_btree_ptr_addr(
struct xfs_btree_cur *cur,
int n,
struct xfs_btree_block *block)
{
int level = xfs_btree_get_level(block);
ASSERT(block->bb_level != 0);
return (union xfs_btree_ptr *)
((char *)block + xfs_btree_ptr_offset(cur, n, level));
}
/*
* Get a the root block which is stored in the inode.
*
* For now this btree implementation assumes the btree root is always
* stored in the if_broot field of an inode fork.
*/
STATIC struct xfs_btree_block *
xfs_btree_get_iroot(
struct xfs_btree_cur *cur)
{
struct xfs_ifork *ifp;
ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
return (struct xfs_btree_block *)ifp->if_broot;
}
/*
* Retrieve the block pointer from the cursor at the given level.
* This may be an inode btree root or from a buffer.
*/
STATIC struct xfs_btree_block * /* generic btree block pointer */
xfs_btree_get_block(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level in btree */
struct xfs_buf **bpp) /* buffer containing the block */
{
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level == cur->bc_nlevels - 1)) {
*bpp = NULL;
return xfs_btree_get_iroot(cur);
}
*bpp = cur->bc_bufs[level];
return XFS_BUF_TO_BLOCK(*bpp);
}
/*
* Get a buffer for the block, return it with no data read.
* Long-form addressing.
*/
xfs_buf_t * /* buffer for fsbno */
xfs_btree_get_bufl(
xfs_mount_t *mp, /* file system mount point */
xfs_trans_t *tp, /* transaction pointer */
xfs_fsblock_t fsbno, /* file system block number */
uint lock) /* lock flags for get_buf */
{
xfs_buf_t *bp; /* buffer pointer (return value) */
xfs_daddr_t d; /* real disk block address */
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
return bp;
}
/*
* Get a buffer for the block, return it with no data read.
* Short-form addressing.
*/
xfs_buf_t * /* buffer for agno/agbno */
xfs_btree_get_bufs(
xfs_mount_t *mp, /* file system mount point */
xfs_trans_t *tp, /* transaction pointer */
xfs_agnumber_t agno, /* allocation group number */
xfs_agblock_t agbno, /* allocation group block number */
uint lock) /* lock flags for get_buf */
{
xfs_buf_t *bp; /* buffer pointer (return value) */
xfs_daddr_t d; /* real disk block address */
ASSERT(agno != NULLAGNUMBER);
ASSERT(agbno != NULLAGBLOCK);
d = XFS_AGB_TO_DADDR(mp, agno, agbno);
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
return bp;
}
/*
* Check for the cursor referring to the last block at the given level.
*/
int /* 1=is last block, 0=not last block */
xfs_btree_islastblock(
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to check */
{
struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
block = xfs_btree_get_block(cur, level, &bp);
xfs_btree_check_block(cur, block, level, bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO;
else
return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK;
}
/*
* Change the cursor to point to the first record at the given level.
* Other levels are unaffected.
*/
STATIC int /* success=1, failure=0 */
xfs_btree_firstrec(
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to change */
{
struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
/*
* Get the block pointer for this level.
*/
block = xfs_btree_get_block(cur, level, &bp);
xfs_btree_check_block(cur, block, level, bp);
/*
* It's empty, there is no such record.
*/
if (!block->bb_numrecs)
return 0;
/*
* Set the ptr value to 1, that's the first record/key.
*/
cur->bc_ptrs[level] = 1;
return 1;
}
/*
* Change the cursor to point to the last record in the current block
* at the given level. Other levels are unaffected.
*/
STATIC int /* success=1, failure=0 */
xfs_btree_lastrec(
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to change */
{
struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
/*
* Get the block pointer for this level.
*/
block = xfs_btree_get_block(cur, level, &bp);
xfs_btree_check_block(cur, block, level, bp);
/*
* It's empty, there is no such record.
*/
if (!block->bb_numrecs)
return 0;
/*
* Set the ptr value to numrecs, that's the last record/key.
*/
cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
return 1;
}
/*
* Compute first and last byte offsets for the fields given.
* Interprets the offsets table, which contains struct field offsets.
*/
void
xfs_btree_offsets(
__int64_t fields, /* bitmask of fields */
const short *offsets, /* table of field offsets */
int nbits, /* number of bits to inspect */
int *first, /* output: first byte offset */
int *last) /* output: last byte offset */
{
int i; /* current bit number */
__int64_t imask; /* mask for current bit number */
ASSERT(fields != 0);
/*
* Find the lowest bit, so the first byte offset.
*/
for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
if (imask & fields) {
*first = offsets[i];
break;
}
}
/*
* Find the highest bit, so the last byte offset.
*/
for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
if (imask & fields) {
*last = offsets[i + 1] - 1;
break;
}
}
}
/*
* Get a buffer for the block, return it read in.
* Long-form addressing.
*/
int /* error */
xfs_btree_read_bufl(
xfs_mount_t *mp, /* file system mount point */
xfs_trans_t *tp, /* transaction pointer */
xfs_fsblock_t fsbno, /* file system block number */
uint lock, /* lock flags for read_buf */
xfs_buf_t **bpp, /* buffer for fsbno */
int refval) /* ref count value for buffer */
{
xfs_buf_t *bp; /* return value */
xfs_daddr_t d; /* real disk block address */
int error;
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
mp->m_bsize, lock, &bp))) {
return error;
}
ASSERT(!bp || !XFS_BUF_GETERROR(bp));
if (bp)
XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
*bpp = bp;
return 0;
}
/*
* Read-ahead the block, don't wait for it, don't return a buffer.
* Long-form addressing.
*/
/* ARGSUSED */
void
xfs_btree_reada_bufl(
xfs_mount_t *mp, /* file system mount point */
xfs_fsblock_t fsbno, /* file system block number */
xfs_extlen_t count) /* count of filesystem blocks */
{
xfs_daddr_t d;
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count);
}
/*
* Read-ahead the block, don't wait for it, don't return a buffer.
* Short-form addressing.
*/
/* ARGSUSED */
void
xfs_btree_reada_bufs(
xfs_mount_t *mp, /* file system mount point */
xfs_agnumber_t agno, /* allocation group number */
xfs_agblock_t agbno, /* allocation group block number */
xfs_extlen_t count) /* count of filesystem blocks */
{
xfs_daddr_t d;
ASSERT(agno != NULLAGNUMBER);
ASSERT(agbno != NULLAGBLOCK);
d = XFS_AGB_TO_DADDR(mp, agno, agbno);
xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count);
}
STATIC int
xfs_btree_readahead_lblock(
struct xfs_btree_cur *cur,
int lr,
struct xfs_btree_block *block)
{
int rval = 0;
xfs_dfsbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
xfs_dfsbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) {
xfs_btree_reada_bufl(cur->bc_mp, left, 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLDFSBNO) {
xfs_btree_reada_bufl(cur->bc_mp, right, 1);
rval++;
}
return rval;
}
STATIC int
xfs_btree_readahead_sblock(
struct xfs_btree_cur *cur,
int lr,
struct xfs_btree_block *block)
{
int rval = 0;
xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
left, 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
right, 1);
rval++;
}
return rval;
}
/*
* Read-ahead btree blocks, at the given level.
* Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
*/
STATIC int
xfs_btree_readahead(
struct xfs_btree_cur *cur, /* btree cursor */
int lev, /* level in btree */
int lr) /* left/right bits */
{
struct xfs_btree_block *block;
/*
* No readahead needed if we are at the root level and the
* btree root is stored in the inode.
*/
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(lev == cur->bc_nlevels - 1))
return 0;
if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
return 0;
cur->bc_ra[lev] |= lr;
block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
return xfs_btree_readahead_lblock(cur, lr, block);
return xfs_btree_readahead_sblock(cur, lr, block);
}
/*
* Set the buffer for level "lev" in the cursor to bp, releasing
* any previous buffer.
*/
STATIC void
xfs_btree_setbuf(
xfs_btree_cur_t *cur, /* btree cursor */
int lev, /* level in btree */
xfs_buf_t *bp) /* new buffer to set */
{
struct xfs_btree_block *b; /* btree block */
if (cur->bc_bufs[lev])
xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
cur->bc_bufs[lev] = bp;
cur->bc_ra[lev] = 0;
b = XFS_BUF_TO_BLOCK(bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO)
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO)
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
} else {
if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK)
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK)
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
}
}
STATIC int
xfs_btree_ptr_is_null(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
return be64_to_cpu(ptr->l) == NULLDFSBNO;
else
return be32_to_cpu(ptr->s) == NULLAGBLOCK;
}
STATIC void
xfs_btree_set_ptr_null(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
ptr->l = cpu_to_be64(NULLDFSBNO);
else
ptr->s = cpu_to_be32(NULLAGBLOCK);
}
/*
* Get/set/init sibling pointers
*/
STATIC void
xfs_btree_get_sibling(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
union xfs_btree_ptr *ptr,
int lr)
{
ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (lr == XFS_BB_RIGHTSIB)
ptr->l = block->bb_u.l.bb_rightsib;
else
ptr->l = block->bb_u.l.bb_leftsib;
} else {
if (lr == XFS_BB_RIGHTSIB)
ptr->s = block->bb_u.s.bb_rightsib;
else
ptr->s = block->bb_u.s.bb_leftsib;
}
}
STATIC void
xfs_btree_set_sibling(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
union xfs_btree_ptr *ptr,
int lr)
{
ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (lr == XFS_BB_RIGHTSIB)
block->bb_u.l.bb_rightsib = ptr->l;
else
block->bb_u.l.bb_leftsib = ptr->l;
} else {
if (lr == XFS_BB_RIGHTSIB)
block->bb_u.s.bb_rightsib = ptr->s;
else
block->bb_u.s.bb_leftsib = ptr->s;
}
}
STATIC void
xfs_btree_init_block(
struct xfs_btree_cur *cur,
int level,
int numrecs,
struct xfs_btree_block *new) /* new block */
{
new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
new->bb_level = cpu_to_be16(level);
new->bb_numrecs = cpu_to_be16(numrecs);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
new->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
new->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
} else {
new->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
new->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
}
}
/*
* Return true if ptr is the last record in the btree and
* we need to track updateѕ to this record. The decision
* will be further refined in the update_lastrec method.
*/
STATIC int
xfs_btree_is_lastrec(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
int level)
{
union xfs_btree_ptr ptr;
if (level > 0)
return 0;
if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
return 0;
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
if (!xfs_btree_ptr_is_null(cur, &ptr))
return 0;
return 1;
}
STATIC void
xfs_btree_buf_to_ptr(
struct xfs_btree_cur *cur,
struct xfs_buf *bp,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
XFS_BUF_ADDR(bp)));
else {
ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
XFS_BUF_ADDR(bp)));
}
}
STATIC xfs_daddr_t
xfs_btree_ptr_to_daddr(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
ASSERT(be64_to_cpu(ptr->l) != NULLDFSBNO);
return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
} else {
ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
ASSERT(be32_to_cpu(ptr->s) != NULLAGBLOCK);
return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
be32_to_cpu(ptr->s));
}
}
STATIC void
xfs_btree_set_refs(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
switch (cur->bc_btnum) {
case XFS_BTNUM_BNO:
case XFS_BTNUM_CNT:
XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
break;
case XFS_BTNUM_INO:
XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, XFS_INO_BTREE_REF);
break;
case XFS_BTNUM_BMAP:
XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_BMAP_BTREE_REF);
break;
default:
ASSERT(0);
}
}
STATIC int
xfs_btree_get_buf_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
int flags,
struct xfs_btree_block **block,
struct xfs_buf **bpp)
{
struct xfs_mount *mp = cur->bc_mp;
xfs_daddr_t d;
/* need to sort out how callers deal with failures first */
ASSERT(!(flags & XBF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr);
*bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
mp->m_bsize, flags);
ASSERT(*bpp);
ASSERT(!XFS_BUF_GETERROR(*bpp));
*block = XFS_BUF_TO_BLOCK(*bpp);
return 0;
}
/*
* Read in the buffer at the given ptr and return the buffer and
* the block pointer within the buffer.
*/
STATIC int
xfs_btree_read_buf_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
int level,
int flags,
struct xfs_btree_block **block,
struct xfs_buf **bpp)
{
struct xfs_mount *mp = cur->bc_mp;
xfs_daddr_t d;
int error;
/* need to sort out how callers deal with failures first */
ASSERT(!(flags & XBF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr);
error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
mp->m_bsize, flags, bpp);
if (error)
return error;
ASSERT(*bpp != NULL);
ASSERT(!XFS_BUF_GETERROR(*bpp));
xfs_btree_set_refs(cur, *bpp);
*block = XFS_BUF_TO_BLOCK(*bpp);
error = xfs_btree_check_block(cur, *block, level, *bpp);
if (error)
xfs_trans_brelse(cur->bc_tp, *bpp);
return error;
}
/*
* Copy keys from one btree block to another.
*/
STATIC void
xfs_btree_copy_keys(
struct xfs_btree_cur *cur,
union xfs_btree_key *dst_key,
union xfs_btree_key *src_key,
int numkeys)
{
ASSERT(numkeys >= 0);
memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
}
/*
* Copy records from one btree block to another.
*/
STATIC void
xfs_btree_copy_recs(
struct xfs_btree_cur *cur,
union xfs_btree_rec *dst_rec,
union xfs_btree_rec *src_rec,
int numrecs)
{
ASSERT(numrecs >= 0);
memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
}
/*
* Copy block pointers from one btree block to another.
*/
STATIC void
xfs_btree_copy_ptrs(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *dst_ptr,
union xfs_btree_ptr *src_ptr,
int numptrs)
{
ASSERT(numptrs >= 0);
memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
}
/*
* Shift keys one index left/right inside a single btree block.
*/
STATIC void
xfs_btree_shift_keys(
struct xfs_btree_cur *cur,
union xfs_btree_key *key,
int dir,
int numkeys)
{
char *dst_key;
ASSERT(numkeys >= 0);
ASSERT(dir == 1 || dir == -1);
dst_key = (char *)key + (dir * cur->bc_ops->key_len);
memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
}
/*
* Shift records one index left/right inside a single btree block.
*/
STATIC void
xfs_btree_shift_recs(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec,
int dir,
int numrecs)
{
char *dst_rec;
ASSERT(numrecs >= 0);
ASSERT(dir == 1 || dir == -1);
dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
}
/*
* Shift block pointers one index left/right inside a single btree block.
*/
STATIC void
xfs_btree_shift_ptrs(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
int dir,
int numptrs)
{
char *dst_ptr;
ASSERT(numptrs >= 0);
ASSERT(dir == 1 || dir == -1);
dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
}
/*
* Log key values from the btree block.
*/
STATIC void
xfs_btree_log_keys(
struct xfs_btree_cur *cur,
struct xfs_buf *bp,
int first,
int last)
{
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
if (bp) {
xfs_trans_log_buf(cur->bc_tp, bp,
xfs_btree_key_offset(cur, first),
xfs_btree_key_offset(cur, last + 1) - 1);
} else {
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
xfs_ilog_fbroot(cur->bc_private.b.whichfork));
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
}
/*
* Log record values from the btree block.
*/
void
xfs_btree_log_recs(
struct xfs_btree_cur *cur,
struct xfs_buf *bp,
int first,
int last)
{
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
xfs_trans_log_buf(cur->bc_tp, bp,
xfs_btree_rec_offset(cur, first),
xfs_btree_rec_offset(cur, last + 1) - 1);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
}
/*
* Log block pointer fields from a btree block (nonleaf).
*/
STATIC void
xfs_btree_log_ptrs(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_buf *bp, /* buffer containing btree block */
int first, /* index of first pointer to log */
int last) /* index of last pointer to log */
{
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
if (bp) {
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
int level = xfs_btree_get_level(block);
xfs_trans_log_buf(cur->bc_tp, bp,
xfs_btree_ptr_offset(cur, first, level),
xfs_btree_ptr_offset(cur, last + 1, level) - 1);
} else {
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
xfs_ilog_fbroot(cur->bc_private.b.whichfork));
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
}
/*
* Log fields from a btree block header.
*/
void
xfs_btree_log_block(
struct xfs_btree_cur *cur, /* btree cursor */
struct xfs_buf *bp, /* buffer containing btree block */
int fields) /* mask of fields: XFS_BB_... */
{
int first; /* first byte offset logged */
int last; /* last byte offset logged */
static const short soffsets[] = { /* table of offsets (short) */
offsetof(struct xfs_btree_block, bb_magic),
offsetof(struct xfs_btree_block, bb_level),
offsetof(struct xfs_btree_block, bb_numrecs),
offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
XFS_BTREE_SBLOCK_LEN
};
static const short loffsets[] = { /* table of offsets (long) */
offsetof(struct xfs_btree_block, bb_magic),
offsetof(struct xfs_btree_block, bb_level),
offsetof(struct xfs_btree_block, bb_numrecs),
offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
XFS_BTREE_LBLOCK_LEN
};
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGBI(cur, bp, fields);
if (bp) {
xfs_btree_offsets(fields,
(cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
loffsets : soffsets,
XFS_BB_NUM_BITS, &first, &last);
xfs_trans_log_buf(cur->bc_tp, bp, first, last);
} else {
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
xfs_ilog_fbroot(cur->bc_private.b.whichfork));
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
}
/*
* Increment cursor by one record at the level.
* For nonzero levels the leaf-ward information is untouched.
*/
int /* error */
xfs_btree_increment(
struct xfs_btree_cur *cur,
int level,
int *stat) /* success/failure */
{
struct xfs_btree_block *block;
union xfs_btree_ptr ptr;
struct xfs_buf *bp;
int error; /* error return value */
int lev;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
ASSERT(level < cur->bc_nlevels);
/* Read-ahead to the right at this level. */
xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
/* Get a pointer to the btree block. */
block = xfs_btree_get_block(cur, level, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
goto error0;
#endif
/* We're done if we remain in the block after the increment. */
if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
goto out1;
/* Fail if we just went off the right edge of the tree. */
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
if (xfs_btree_ptr_is_null(cur, &ptr))
goto out0;
XFS_BTREE_STATS_INC(cur, increment);
/*
* March up the tree incrementing pointers.
* Stop when we don't go off the right edge of a block.
*/
for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
block = xfs_btree_get_block(cur, lev, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, lev, bp);
if (error)
goto error0;
#endif
if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
break;
/* Read-ahead the right block for the next loop. */
xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
}
/*
* If we went off the root then we are either seriously
* confused or have the tree root in an inode.
*/
if (lev == cur->bc_nlevels) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
goto out0;
ASSERT(0);
error = EFSCORRUPTED;
goto error0;
}
ASSERT(lev < cur->bc_nlevels);
/*
* Now walk back down the tree, fixing up the cursor's buffer
* pointers and key numbers.
*/
for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
union xfs_btree_ptr *ptrp;
ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
error = xfs_btree_read_buf_block(cur, ptrp, --lev,
0, &block, &bp);
if (error)
goto error0;
xfs_btree_setbuf(cur, lev, bp);
cur->bc_ptrs[lev] = 1;
}
out1:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Decrement cursor by one record at the level.
* For nonzero levels the leaf-ward information is untouched.
*/
int /* error */
xfs_btree_decrement(
struct xfs_btree_cur *cur,
int level,
int *stat) /* success/failure */
{
struct xfs_btree_block *block;
xfs_buf_t *bp;
int error; /* error return value */
int lev;
union xfs_btree_ptr ptr;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
ASSERT(level < cur->bc_nlevels);
/* Read-ahead to the left at this level. */
xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
/* We're done if we remain in the block after the decrement. */
if (--cur->bc_ptrs[level] > 0)
goto out1;
/* Get a pointer to the btree block. */
block = xfs_btree_get_block(cur, level, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
goto error0;
#endif
/* Fail if we just went off the left edge of the tree. */
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
if (xfs_btree_ptr_is_null(cur, &ptr))
goto out0;
XFS_BTREE_STATS_INC(cur, decrement);
/*
* March up the tree decrementing pointers.
* Stop when we don't go off the left edge of a block.
*/
for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
if (--cur->bc_ptrs[lev] > 0)
break;
/* Read-ahead the left block for the next loop. */
xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
}
/*
* If we went off the root then we are seriously confused.
* or the root of the tree is in an inode.
*/
if (lev == cur->bc_nlevels) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
goto out0;
ASSERT(0);
error = EFSCORRUPTED;
goto error0;
}
ASSERT(lev < cur->bc_nlevels);
/*
* Now walk back down the tree, fixing up the cursor's buffer
* pointers and key numbers.
*/
for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
union xfs_btree_ptr *ptrp;
ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
error = xfs_btree_read_buf_block(cur, ptrp, --lev,
0, &block, &bp);
if (error)
goto error0;
xfs_btree_setbuf(cur, lev, bp);
cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
}
out1:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
STATIC int
xfs_btree_lookup_get_block(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level in the btree */
union xfs_btree_ptr *pp, /* ptr to btree block */
struct xfs_btree_block **blkp) /* return btree block */
{
struct xfs_buf *bp; /* buffer pointer for btree block */
int error = 0;
/* special case the root block if in an inode */
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level == cur->bc_nlevels - 1)) {
*blkp = xfs_btree_get_iroot(cur);
return 0;
}
/*
* If the old buffer at this level for the disk address we are
* looking for re-use it.
*
* Otherwise throw it away and get a new one.
*/
bp = cur->bc_bufs[level];
if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) {
*blkp = XFS_BUF_TO_BLOCK(bp);
return 0;
}
error = xfs_btree_read_buf_block(cur, pp, level, 0, blkp, &bp);
if (error)
return error;
xfs_btree_setbuf(cur, level, bp);
return 0;
}
/*
* Get current search key. For level 0 we don't actually have a key
* structure so we make one up from the record. For all other levels
* we just return the right key.
*/
STATIC union xfs_btree_key *
xfs_lookup_get_search_key(
struct xfs_btree_cur *cur,
int level,
int keyno,
struct xfs_btree_block *block,
union xfs_btree_key *kp)
{
if (level == 0) {
cur->bc_ops->init_key_from_rec(kp,
xfs_btree_rec_addr(cur, keyno, block));
return kp;
}
return xfs_btree_key_addr(cur, keyno, block);
}
/*
* Lookup the record. The cursor is made to point to it, based on dir.
* Return 0 if can't find any such record, 1 for success.
*/
int /* error */
xfs_btree_lookup(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_lookup_t dir, /* <=, ==, or >= */
int *stat) /* success/failure */
{
struct xfs_btree_block *block; /* current btree block */
__int64_t diff; /* difference for the current key */
int error; /* error return value */
int keyno; /* current key number */
int level; /* level in the btree */
union xfs_btree_ptr *pp; /* ptr to btree block */
union xfs_btree_ptr ptr; /* ptr to btree block */
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, dir);
XFS_BTREE_STATS_INC(cur, lookup);
block = NULL;
keyno = 0;
/* initialise start pointer from cursor */
cur->bc_ops->init_ptr_from_cur(cur, &ptr);
pp = &ptr;
/*
* Iterate over each level in the btree, starting at the root.
* For each level above the leaves, find the key we need, based
* on the lookup record, then follow the corresponding block
* pointer down to the next level.
*/
for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
/* Get the block we need to do the lookup on. */
error = xfs_btree_lookup_get_block(cur, level, pp, &block);
if (error)
goto error0;
if (diff == 0) {
/*
* If we already had a key match at a higher level, we
* know we need to use the first entry in this block.
*/
keyno = 1;
} else {
/* Otherwise search this block. Do a binary search. */
int high; /* high entry number */
int low; /* low entry number */
/* Set low and high entry numbers, 1-based. */
low = 1;
high = xfs_btree_get_numrecs(block);
if (!high) {
/* Block is empty, must be an empty leaf. */
ASSERT(level == 0 && cur->bc_nlevels == 1);
cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
/* Binary search the block. */
while (low <= high) {
union xfs_btree_key key;
union xfs_btree_key *kp;
XFS_BTREE_STATS_INC(cur, compare);
/* keyno is average of low and high. */
keyno = (low + high) >> 1;
/* Get current search key */
kp = xfs_lookup_get_search_key(cur, level,
keyno, block, &key);
/*
* Compute difference to get next direction:
* - less than, move right
* - greater than, move left
* - equal, we're done
*/
diff = cur->bc_ops->key_diff(cur, kp);
if (diff < 0)
low = keyno + 1;
else if (diff > 0)
high = keyno - 1;
else
break;
}
}
/*
* If there are more levels, set up for the next level
* by getting the block number and filling in the cursor.
*/
if (level > 0) {
/*
* If we moved left, need the previous key number,
* unless there isn't one.
*/
if (diff > 0 && --keyno < 1)
keyno = 1;
pp = xfs_btree_ptr_addr(cur, keyno, block);
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, pp, 0, level);
if (error)
goto error0;
#endif
cur->bc_ptrs[level] = keyno;
}
}
/* Done with the search. See if we need to adjust the results. */
if (dir != XFS_LOOKUP_LE && diff < 0) {
keyno++;
/*
* If ge search and we went off the end of the block, but it's
* not the last block, we're in the wrong block.
*/
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
if (dir == XFS_LOOKUP_GE &&
keyno > xfs_btree_get_numrecs(block) &&
!xfs_btree_ptr_is_null(cur, &ptr)) {
int i;
cur->bc_ptrs[0] = keyno;
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto error0;
XFS_WANT_CORRUPTED_RETURN(i == 1);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
}
} else if (dir == XFS_LOOKUP_LE && diff > 0)
keyno--;
cur->bc_ptrs[0] = keyno;
/* Return if we succeeded or not. */
if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
*stat = 0;
else if (dir != XFS_LOOKUP_EQ || diff == 0)
*stat = 1;
else
*stat = 0;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Update keys at all levels from here to the root along the cursor's path.
*/
STATIC int
xfs_btree_updkey(
struct xfs_btree_cur *cur,
union xfs_btree_key *keyp,
int level)
{
struct xfs_btree_block *block;
struct xfs_buf *bp;
union xfs_btree_key *kp;
int ptr;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGIK(cur, level, keyp);
ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || level >= 1);
/*
* Go up the tree from this level toward the root.
* At each level, update the key value to the value input.
* Stop when we reach a level where the cursor isn't pointing
* at the first entry in the block.
*/
for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
#ifdef DEBUG
int error;
#endif
block = xfs_btree_get_block(cur, level, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
#endif
ptr = cur->bc_ptrs[level];
kp = xfs_btree_key_addr(cur, ptr, block);
xfs_btree_copy_keys(cur, kp, keyp, 1);
xfs_btree_log_keys(cur, bp, ptr, ptr);
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
}
/*
* Update the record referred to by cur to the value in the
* given record. This either works (return 0) or gets an
* EFSCORRUPTED error.
*/
int
xfs_btree_update(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
{
struct xfs_btree_block *block;
struct xfs_buf *bp;
int error;
int ptr;
union xfs_btree_rec *rp;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGR(cur, rec);
/* Pick up the current block. */
block = xfs_btree_get_block(cur, 0, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, 0, bp);
if (error)
goto error0;
#endif
/* Get the address of the rec to be updated. */
ptr = cur->bc_ptrs[0];
rp = xfs_btree_rec_addr(cur, ptr, block);
/* Fill in the new contents and log them. */
xfs_btree_copy_recs(cur, rp, rec, 1);
xfs_btree_log_recs(cur, bp, ptr, ptr);
/*
* If we are tracking the last record in the tree and
* we are at the far right edge of the tree, update it.
*/
if (xfs_btree_is_lastrec(cur, block, 0)) {
cur->bc_ops->update_lastrec(cur, block, rec,
ptr, LASTREC_UPDATE);
}
/* Updating first rec in leaf. Pass new key value up to our parent. */
if (ptr == 1) {
union xfs_btree_key key;
cur->bc_ops->init_key_from_rec(&key, rec);
error = xfs_btree_updkey(cur, &key, 1);
if (error)
goto error0;
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Move 1 record left from cur/level if possible.
* Update cur to reflect the new path.
*/
STATIC int /* error */
xfs_btree_lshift(
struct xfs_btree_cur *cur,
int level,
int *stat) /* success/failure */
{
union xfs_btree_key key; /* btree key */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
int lrecs; /* left record count */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
int rrecs; /* right record count */
union xfs_btree_ptr lptr; /* left btree pointer */
union xfs_btree_key *rkp = NULL; /* right btree key */
union xfs_btree_ptr *rpp = NULL; /* right address pointer */
union xfs_btree_rec *rrp = NULL; /* right record pointer */
int error; /* error return value */
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1)
goto out0;
/* Set up variables for this block as "right". */
right = xfs_btree_get_block(cur, level, &rbp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, right, level, rbp);
if (error)
goto error0;
#endif
/* If we've got no left sibling then we can't shift an entry left. */
xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
if (xfs_btree_ptr_is_null(cur, &lptr))
goto out0;
/*
* If the cursor entry is the one that would be moved, don't
* do it... it's too complicated.
*/
if (cur->bc_ptrs[level] <= 1)
goto out0;
/* Set up the left neighbor as "left". */
error = xfs_btree_read_buf_block(cur, &lptr, level, 0, &left, &lbp);
if (error)
goto error0;
/* If it's full, it can't take another entry. */
lrecs = xfs_btree_get_numrecs(left);
if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
goto out0;
rrecs = xfs_btree_get_numrecs(right);
/*
* We add one entry to the left side and remove one for the right side.
* Account for it here, the changes will be updated on disk and logged
* later.
*/
lrecs++;
rrecs--;
XFS_BTREE_STATS_INC(cur, lshift);
XFS_BTREE_STATS_ADD(cur, moves, 1);
/*
* If non-leaf, copy a key and a ptr to the left block.
* Log the changes to the left block.
*/
if (level > 0) {
/* It's a non-leaf. Move keys and pointers. */
union xfs_btree_key *lkp; /* left btree key */
union xfs_btree_ptr *lpp; /* left address pointer */
lkp = xfs_btree_key_addr(cur, lrecs, left);
rkp = xfs_btree_key_addr(cur, 1, right);
lpp = xfs_btree_ptr_addr(cur, lrecs, left);
rpp = xfs_btree_ptr_addr(cur, 1, right);
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, rpp, 0, level);
if (error)
goto error0;
#endif
xfs_btree_copy_keys(cur, lkp, rkp, 1);
xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
ASSERT(cur->bc_ops->keys_inorder(cur,
xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
} else {
/* It's a leaf. Move records. */
union xfs_btree_rec *lrp; /* left record pointer */
lrp = xfs_btree_rec_addr(cur, lrecs, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
xfs_btree_copy_recs(cur, lrp, rrp, 1);
xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
ASSERT(cur->bc_ops->recs_inorder(cur,
xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
}
xfs_btree_set_numrecs(left, lrecs);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
xfs_btree_set_numrecs(right, rrecs);
xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
/*
* Slide the contents of right down one entry.
*/
XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
if (level > 0) {
/* It's a nonleaf. operate on keys and ptrs */
#ifdef DEBUG
int i; /* loop index */
for (i = 0; i < rrecs; i++) {
error = xfs_btree_check_ptr(cur, rpp, i + 1, level);
if (error)
goto error0;
}
#endif
xfs_btree_shift_keys(cur,
xfs_btree_key_addr(cur, 2, right),
-1, rrecs);
xfs_btree_shift_ptrs(cur,
xfs_btree_ptr_addr(cur, 2, right),
-1, rrecs);
xfs_btree_log_keys(cur, rbp, 1, rrecs);
xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
} else {
/* It's a leaf. operate on records */
xfs_btree_shift_recs(cur,
xfs_btree_rec_addr(cur, 2, right),
-1, rrecs);
xfs_btree_log_recs(cur, rbp, 1, rrecs);
/*
* If it's the first record in the block, we'll need a key
* structure to pass up to the next level (updkey).
*/
cur->bc_ops->init_key_from_rec(&key,
xfs_btree_rec_addr(cur, 1, right));
rkp = &key;
}
/* Update the parent key values of right. */
error = xfs_btree_updkey(cur, rkp, level + 1);
if (error)
goto error0;
/* Slide the cursor value left one. */
cur->bc_ptrs[level]--;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Move 1 record right from cur/level if possible.
* Update cur to reflect the new path.
*/
STATIC int /* error */
xfs_btree_rshift(
struct xfs_btree_cur *cur,
int level,
int *stat) /* success/failure */
{
union xfs_btree_key key; /* btree key */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
struct xfs_btree_cur *tcur; /* temporary btree cursor */
union xfs_btree_ptr rptr; /* right block pointer */
union xfs_btree_key *rkp; /* right btree key */
int rrecs; /* right record count */
int lrecs; /* left record count */
int error; /* error return value */
int i; /* loop counter */
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level == cur->bc_nlevels - 1))
goto out0;
/* Set up variables for this block as "left". */
left = xfs_btree_get_block(cur, level, &lbp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, left, level, lbp);
if (error)
goto error0;
#endif
/* If we've got no right sibling then we can't shift an entry right. */
xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
if (xfs_btree_ptr_is_null(cur, &rptr))
goto out0;
/*
* If the cursor entry is the one that would be moved, don't
* do it... it's too complicated.
*/
lrecs = xfs_btree_get_numrecs(left);
if (cur->bc_ptrs[level] >= lrecs)
goto out0;
/* Set up the right neighbor as "right". */
error = xfs_btree_read_buf_block(cur, &rptr, level, 0, &right, &rbp);
if (error)
goto error0;
/* If it's full, it can't take another entry. */
rrecs = xfs_btree_get_numrecs(right);
if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
goto out0;
XFS_BTREE_STATS_INC(cur, rshift);
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
/*
* Make a hole at the start of the right neighbor block, then
* copy the last left block entry to the hole.
*/
if (level > 0) {
/* It's a nonleaf. make a hole in the keys and ptrs */
union xfs_btree_key *lkp;
union xfs_btree_ptr *lpp;
union xfs_btree_ptr *rpp;
lkp = xfs_btree_key_addr(cur, lrecs, left);
lpp = xfs_btree_ptr_addr(cur, lrecs, left);
rkp = xfs_btree_key_addr(cur, 1, right);
rpp = xfs_btree_ptr_addr(cur, 1, right);
#ifdef DEBUG
for (i = rrecs - 1; i >= 0; i--) {
error = xfs_btree_check_ptr(cur, rpp, i, level);
if (error)
goto error0;
}
#endif
xfs_btree_shift_keys(cur, rkp, 1, rrecs);
xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, lpp, 0, level);
if (error)
goto error0;
#endif
/* Now put the new data in, and log it. */
xfs_btree_copy_keys(cur, rkp, lkp, 1);
xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
xfs_btree_key_addr(cur, 2, right)));
} else {
/* It's a leaf. make a hole in the records */
union xfs_btree_rec *lrp;
union xfs_btree_rec *rrp;
lrp = xfs_btree_rec_addr(cur, lrecs, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
xfs_btree_shift_recs(cur, rrp, 1, rrecs);
/* Now put the new data in, and log it. */
xfs_btree_copy_recs(cur, rrp, lrp, 1);
xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
cur->bc_ops->init_key_from_rec(&key, rrp);
rkp = &key;
ASSERT(cur->bc_ops->recs_inorder(cur, rrp,
xfs_btree_rec_addr(cur, 2, right)));
}
/*
* Decrement and log left's numrecs, bump and log right's numrecs.
*/
xfs_btree_set_numrecs(left, --lrecs);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
xfs_btree_set_numrecs(right, ++rrecs);
xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
/*
* Using a temporary cursor, update the parent key values of the
* block on the right.
*/
error = xfs_btree_dup_cursor(cur, &tcur);
if (error)
goto error0;
i = xfs_btree_lastrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
error = xfs_btree_increment(tcur, level, &i);
if (error)
goto error1;
error = xfs_btree_updkey(tcur, rkp, level + 1);
if (error)
goto error1;
xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
error1:
XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
return error;
}
/*
* Split cur/level block in half.
* Return new block number and the key to its first
* record (to be inserted into parent).
*/
STATIC int /* error */
xfs_btree_split(
struct xfs_btree_cur *cur,
int level,
union xfs_btree_ptr *ptrp,
union xfs_btree_key *key,
struct xfs_btree_cur **curp,
int *stat) /* success/failure */
{
union xfs_btree_ptr lptr; /* left sibling block ptr */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
union xfs_btree_ptr rptr; /* right sibling block ptr */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
union xfs_btree_ptr rrptr; /* right-right sibling ptr */
struct xfs_buf *rrbp; /* right-right buffer pointer */
struct xfs_btree_block *rrblock; /* right-right btree block */
int lrecs;
int rrecs;
int src_index;
int error; /* error return value */
#ifdef DEBUG
int i;
#endif
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key);
XFS_BTREE_STATS_INC(cur, split);
/* Set up left block (current one). */
left = xfs_btree_get_block(cur, level, &lbp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, left, level, lbp);
if (error)
goto error0;
#endif
xfs_btree_buf_to_ptr(cur, lbp, &lptr);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, 1, stat);
if (error)
goto error0;
if (*stat == 0)
goto out0;
XFS_BTREE_STATS_INC(cur, alloc);
/* Set up the new block as "right". */
error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp);
if (error)
goto error0;
/* Fill in the btree header for the new right block. */
xfs_btree_init_block(cur, xfs_btree_get_level(left), 0, right);
/*
* Split the entries between the old and the new block evenly.
* Make sure that if there's an odd number of entries now, that
* each new block will have the same number of entries.
*/
lrecs = xfs_btree_get_numrecs(left);
rrecs = lrecs / 2;
if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
rrecs++;
src_index = (lrecs - rrecs + 1);
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
/*
* Copy btree block entries from the left block over to the
* new block, the right. Update the right block and log the
* changes.
*/
if (level > 0) {
/* It's a non-leaf. Move keys and pointers. */
union xfs_btree_key *lkp; /* left btree key */
union xfs_btree_ptr *lpp; /* left address pointer */
union xfs_btree_key *rkp; /* right btree key */
union xfs_btree_ptr *rpp; /* right address pointer */
lkp = xfs_btree_key_addr(cur, src_index, left);
lpp = xfs_btree_ptr_addr(cur, src_index, left);
rkp = xfs_btree_key_addr(cur, 1, right);
rpp = xfs_btree_ptr_addr(cur, 1, right);
#ifdef DEBUG
for (i = src_index; i < rrecs; i++) {
error = xfs_btree_check_ptr(cur, lpp, i, level);
if (error)
goto error0;
}
#endif
xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
xfs_btree_log_keys(cur, rbp, 1, rrecs);
xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
/* Grab the keys to the entries moved to the right block */
xfs_btree_copy_keys(cur, key, rkp, 1);
} else {
/* It's a leaf. Move records. */
union xfs_btree_rec *lrp; /* left record pointer */
union xfs_btree_rec *rrp; /* right record pointer */
lrp = xfs_btree_rec_addr(cur, src_index, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
xfs_btree_log_recs(cur, rbp, 1, rrecs);
cur->bc_ops->init_key_from_rec(key,
xfs_btree_rec_addr(cur, 1, right));
}
/*
* Find the left block number by looking in the buffer.
* Adjust numrecs, sibling pointers.
*/
xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
lrecs -= rrecs;
xfs_btree_set_numrecs(left, lrecs);
xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
/*
* If there's a block to the new block's right, make that block
* point back to right instead of to left.
*/
if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
error = xfs_btree_read_buf_block(cur, &rrptr, level,
0, &rrblock, &rrbp);
if (error)
goto error0;
xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
}
/*
* If the cursor is really in the right block, move it there.
* If it's just pointing past the last entry in left, then we'll
* insert there, so don't change anything in that case.
*/
if (cur->bc_ptrs[level] > lrecs + 1) {
xfs_btree_setbuf(cur, level, rbp);
cur->bc_ptrs[level] -= lrecs;
}
/*
* If there are more levels, we'll need another cursor which refers
* the right block, no matter where this cursor was.
*/
if (level + 1 < cur->bc_nlevels) {
error = xfs_btree_dup_cursor(cur, curp);
if (error)
goto error0;
(*curp)->bc_ptrs[level + 1]++;
}
*ptrp = rptr;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Copy the old inode root contents into a real block and make the
* broot point to it.
*/
int /* error */
xfs_btree_new_iroot(
struct xfs_btree_cur *cur, /* btree cursor */
int *logflags, /* logging flags for inode */
int *stat) /* return status - 0 fail */
{
struct xfs_buf *cbp; /* buffer for cblock */
struct xfs_btree_block *block; /* btree block */
struct xfs_btree_block *cblock; /* child btree block */
union xfs_btree_key *ckp; /* child key pointer */
union xfs_btree_ptr *cpp; /* child ptr pointer */
union xfs_btree_key *kp; /* pointer to btree key */
union xfs_btree_ptr *pp; /* pointer to block addr */
union xfs_btree_ptr nptr; /* new block addr */
int level; /* btree level */
int error; /* error return code */
#ifdef DEBUG
int i; /* loop counter */
#endif
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_STATS_INC(cur, newroot);
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
level = cur->bc_nlevels - 1;
block = xfs_btree_get_iroot(cur);
pp = xfs_btree_ptr_addr(cur, 1, block);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, pp, &nptr, 1, stat);
if (error)
goto error0;
if (*stat == 0) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
}
XFS_BTREE_STATS_INC(cur, alloc);
/* Copy the root into a real block. */
error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp);
if (error)
goto error0;
memcpy(cblock, block, xfs_btree_block_len(cur));
be16_add_cpu(&block->bb_level, 1);
xfs_btree_set_numrecs(block, 1);
cur->bc_nlevels++;
cur->bc_ptrs[level + 1] = 1;
kp = xfs_btree_key_addr(cur, 1, block);
ckp = xfs_btree_key_addr(cur, 1, cblock);
xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
cpp = xfs_btree_ptr_addr(cur, 1, cblock);
#ifdef DEBUG
for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
error = xfs_btree_check_ptr(cur, pp, i, level);
if (error)
goto error0;
}
#endif
xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, &nptr, 0, level);
if (error)
goto error0;
#endif
xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
xfs_iroot_realloc(cur->bc_private.b.ip,
1 - xfs_btree_get_numrecs(cblock),
cur->bc_private.b.whichfork);
xfs_btree_setbuf(cur, level, cbp);
/*
* Do all this logging at the end so that
* the root is at the right level.
*/
xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
*logflags |=
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
*stat = 1;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Allocate a new root block, fill it in.
*/
STATIC int /* error */
xfs_btree_new_root(
struct xfs_btree_cur *cur, /* btree cursor */
int *stat) /* success/failure */
{
struct xfs_btree_block *block; /* one half of the old root block */
struct xfs_buf *bp; /* buffer containing block */
int error; /* error return value */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
struct xfs_buf *nbp; /* new (root) buffer */
struct xfs_btree_block *new; /* new (root) btree block */
int nptr; /* new value for key index, 1 or 2 */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
union xfs_btree_ptr rptr;
union xfs_btree_ptr lptr;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_STATS_INC(cur, newroot);
/* initialise our start point from the cursor */
cur->bc_ops->init_ptr_from_cur(cur, &rptr);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, 1, stat);
if (error)
goto error0;
if (*stat == 0)
goto out0;
XFS_BTREE_STATS_INC(cur, alloc);
/* Set up the new block. */
error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp);
if (error)
goto error0;
/* Set the root in the holding structure increasing the level by 1. */
cur->bc_ops->set_root(cur, &lptr, 1);
/*
* At the previous root level there are now two blocks: the old root,
* and the new block generated when it was split. We don't know which
* one the cursor is pointing at, so we set up variables "left" and
* "right" for each case.
*/
block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
if (error)
goto error0;
#endif
xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
if (!xfs_btree_ptr_is_null(cur, &rptr)) {
/* Our block is left, pick up the right block. */
lbp = bp;
xfs_btree_buf_to_ptr(cur, lbp, &lptr);
left = block;
error = xfs_btree_read_buf_block(cur, &rptr,
cur->bc_nlevels - 1, 0, &right, &rbp);
if (error)
goto error0;
bp = rbp;
nptr = 1;
} else {
/* Our block is right, pick up the left block. */
rbp = bp;
xfs_btree_buf_to_ptr(cur, rbp, &rptr);
right = block;
xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
error = xfs_btree_read_buf_block(cur, &lptr,
cur->bc_nlevels - 1, 0, &left, &lbp);
if (error)
goto error0;
bp = lbp;
nptr = 2;
}
/* Fill in the new block's btree header and log it. */
xfs_btree_init_block(cur, cur->bc_nlevels, 2, new);
xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
!xfs_btree_ptr_is_null(cur, &rptr));
/* Fill in the key data in the new root. */
if (xfs_btree_get_level(left) > 0) {
xfs_btree_copy_keys(cur,
xfs_btree_key_addr(cur, 1, new),
xfs_btree_key_addr(cur, 1, left), 1);
xfs_btree_copy_keys(cur,
xfs_btree_key_addr(cur, 2, new),
xfs_btree_key_addr(cur, 1, right), 1);
} else {
cur->bc_ops->init_key_from_rec(
xfs_btree_key_addr(cur, 1, new),
xfs_btree_rec_addr(cur, 1, left));
cur->bc_ops->init_key_from_rec(
xfs_btree_key_addr(cur, 2, new),
xfs_btree_rec_addr(cur, 1, right));
}
xfs_btree_log_keys(cur, nbp, 1, 2);
/* Fill in the pointer data in the new root. */
xfs_btree_copy_ptrs(cur,
xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
xfs_btree_copy_ptrs(cur,
xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
xfs_btree_log_ptrs(cur, nbp, 1, 2);
/* Fix up the cursor. */
xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
cur->bc_ptrs[cur->bc_nlevels] = nptr;
cur->bc_nlevels++;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
STATIC int
xfs_btree_make_block_unfull(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* btree level */
int numrecs,/* # of recs in block */
int *oindex,/* old tree index */
int *index, /* new tree index */
union xfs_btree_ptr *nptr, /* new btree ptr */
struct xfs_btree_cur **ncur, /* new btree cursor */
union xfs_btree_rec *nrec, /* new record */
int *stat)
{
union xfs_btree_key key; /* new btree key value */
int error = 0;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1) {
struct xfs_inode *ip = cur->bc_private.b.ip;
if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
/* A root block that can be made bigger. */
xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
} else {
/* A root block that needs replacing */
int logflags = 0;
error = xfs_btree_new_iroot(cur, &logflags, stat);
if (error || *stat == 0)
return error;
xfs_trans_log_inode(cur->bc_tp, ip, logflags);
}
return 0;
}
/* First, try shifting an entry to the right neighbor. */
error = xfs_btree_rshift(cur, level, stat);
if (error || *stat)
return error;
/* Next, try shifting an entry to the left neighbor. */
error = xfs_btree_lshift(cur, level, stat);
if (error)
return error;
if (*stat) {
*oindex = *index = cur->bc_ptrs[level];
return 0;
}
/*
* Next, try splitting the current block in half.
*
* If this works we have to re-set our variables because we
* could be in a different block now.
*/
error = xfs_btree_split(cur, level, nptr, &key, ncur, stat);
if (error || *stat == 0)
return error;
*index = cur->bc_ptrs[level];
cur->bc_ops->init_rec_from_key(&key, nrec);
return 0;
}
/*
* Insert one record/level. Return information to the caller
* allowing the next level up to proceed if necessary.
*/
STATIC int
xfs_btree_insrec(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level to insert record at */
union xfs_btree_ptr *ptrp, /* i/o: block number inserted */
union xfs_btree_rec *recp, /* i/o: record data inserted */
struct xfs_btree_cur **curp, /* output: new cursor replacing cur */
int *stat) /* success/failure */
{
struct xfs_btree_block *block; /* btree block */
struct xfs_buf *bp; /* buffer for block */
union xfs_btree_key key; /* btree key */
union xfs_btree_ptr nptr; /* new block ptr */
struct xfs_btree_cur *ncur; /* new btree cursor */
union xfs_btree_rec nrec; /* new record count */
int optr; /* old key/record index */
int ptr; /* key/record index */
int numrecs;/* number of records */
int error; /* error return value */
#ifdef DEBUG
int i;
#endif
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp);
ncur = NULL;
/*
* If we have an external root pointer, and we've made it to the
* root level, allocate a new root block and we're done.
*/
if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level >= cur->bc_nlevels)) {
error = xfs_btree_new_root(cur, stat);
xfs_btree_set_ptr_null(cur, ptrp);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return error;
}
/* If we're off the left edge, return failure. */
ptr = cur->bc_ptrs[level];
if (ptr == 0) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
/* Make a key out of the record data to be inserted, and save it. */
cur->bc_ops->init_key_from_rec(&key, recp);
optr = ptr;
XFS_BTREE_STATS_INC(cur, insrec);
/* Get pointers to the btree buffer and block. */
block = xfs_btree_get_block(cur, level, &bp);
numrecs = xfs_btree_get_numrecs(block);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
goto error0;
/* Check that the new entry is being inserted in the right place. */
if (ptr <= numrecs) {
if (level == 0) {
ASSERT(cur->bc_ops->recs_inorder(cur, recp,
xfs_btree_rec_addr(cur, ptr, block)));
} else {
ASSERT(cur->bc_ops->keys_inorder(cur, &key,
xfs_btree_key_addr(cur, ptr, block)));
}
}
#endif
/*
* If the block is full, we can't insert the new entry until we
* make the block un-full.
*/
xfs_btree_set_ptr_null(cur, &nptr);
if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
error = xfs_btree_make_block_unfull(cur, level, numrecs,
&optr, &ptr, &nptr, &ncur, &nrec, stat);
if (error || *stat == 0)
goto error0;
}
/*
* The current block may have changed if the block was
* previously full and we have just made space in it.
*/
block = xfs_btree_get_block(cur, level, &bp);
numrecs = xfs_btree_get_numrecs(block);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
return error;
#endif
/*
* At this point we know there's room for our new entry in the block
* we're pointing at.
*/
XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
if (level > 0) {
/* It's a nonleaf. make a hole in the keys and ptrs */
union xfs_btree_key *kp;
union xfs_btree_ptr *pp;
kp = xfs_btree_key_addr(cur, ptr, block);
pp = xfs_btree_ptr_addr(cur, ptr, block);
#ifdef DEBUG
for (i = numrecs - ptr; i >= 0; i--) {
error = xfs_btree_check_ptr(cur, pp, i, level);
if (error)
return error;
}
#endif
xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
#ifdef DEBUG
error = xfs_btree_check_ptr(cur, ptrp, 0, level);
if (error)
goto error0;
#endif
/* Now put the new data in, bump numrecs and log it. */
xfs_btree_copy_keys(cur, kp, &key, 1);
xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
numrecs++;
xfs_btree_set_numrecs(block, numrecs);
xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
xfs_btree_log_keys(cur, bp, ptr, numrecs);
#ifdef DEBUG
if (ptr < numrecs) {
ASSERT(cur->bc_ops->keys_inorder(cur, kp,
xfs_btree_key_addr(cur, ptr + 1, block)));
}
#endif
} else {
/* It's a leaf. make a hole in the records */
union xfs_btree_rec *rp;
rp = xfs_btree_rec_addr(cur, ptr, block);
xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
/* Now put the new data in, bump numrecs and log it. */
xfs_btree_copy_recs(cur, rp, recp, 1);
xfs_btree_set_numrecs(block, ++numrecs);
xfs_btree_log_recs(cur, bp, ptr, numrecs);
#ifdef DEBUG
if (ptr < numrecs) {
ASSERT(cur->bc_ops->recs_inorder(cur, rp,
xfs_btree_rec_addr(cur, ptr + 1, block)));
}
#endif
}
/* Log the new number of records in the btree header. */
xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
/* If we inserted at the start of a block, update the parents' keys. */
if (optr == 1) {
error = xfs_btree_updkey(cur, &key, level + 1);
if (error)
goto error0;
}
/*
* If we are tracking the last record in the tree and
* we are at the far right edge of the tree, update it.
*/
if (xfs_btree_is_lastrec(cur, block, level)) {
cur->bc_ops->update_lastrec(cur, block, recp,
ptr, LASTREC_INSREC);
}
/*
* Return the new block number, if any.
* If there is one, give back a record value and a cursor too.
*/
*ptrp = nptr;
if (!xfs_btree_ptr_is_null(cur, &nptr)) {
*recp = nrec;
*curp = ncur;
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Insert the record at the point referenced by cur.
*
* A multi-level split of the tree on insert will invalidate the original
* cursor. All callers of this function should assume that the cursor is
* no longer valid and revalidate it.
*/
int
xfs_btree_insert(
struct xfs_btree_cur *cur,
int *stat)
{
int error; /* error return value */
int i; /* result value, 0 for failure */
int level; /* current level number in btree */
union xfs_btree_ptr nptr; /* new block number (split result) */
struct xfs_btree_cur *ncur; /* new cursor (split result) */
struct xfs_btree_cur *pcur; /* previous level's cursor */
union xfs_btree_rec rec; /* record to insert */
level = 0;
ncur = NULL;
pcur = cur;
xfs_btree_set_ptr_null(cur, &nptr);
cur->bc_ops->init_rec_from_cur(cur, &rec);
/*
* Loop going up the tree, starting at the leaf level.
* Stop when we don't get a split block, that must mean that
* the insert is finished with this level.
*/
do {
/*
* Insert nrec/nptr into this level of the tree.
* Note if we fail, nptr will be null.
*/
error = xfs_btree_insrec(pcur, level, &nptr, &rec, &ncur, &i);
if (error) {
if (pcur != cur)
xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
goto error0;
}
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
level++;
/*
* See if the cursor we just used is trash.
* Can't trash the caller's cursor, but otherwise we should
* if ncur is a new cursor or we're about to be done.
*/
if (pcur != cur &&
(ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
/* Save the state from the cursor before we trash it */
if (cur->bc_ops->update_cursor)
cur->bc_ops->update_cursor(pcur, cur);
cur->bc_nlevels = pcur->bc_nlevels;
xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
}
/* If we got a new cursor, switch to it. */
if (ncur) {
pcur = ncur;
ncur = NULL;
}
} while (!xfs_btree_ptr_is_null(cur, &nptr));
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = i;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Try to merge a non-leaf block back into the inode root.
*
* Note: the killroot names comes from the fact that we're effectively
* killing the old root block. But because we can't just delete the
* inode we have to copy the single block it was pointing to into the
* inode.
*/
STATIC int
xfs_btree_kill_iroot(
struct xfs_btree_cur *cur)
{
int whichfork = cur->bc_private.b.whichfork;
struct xfs_inode *ip = cur->bc_private.b.ip;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_btree_block *block;
struct xfs_btree_block *cblock;
union xfs_btree_key *kp;
union xfs_btree_key *ckp;
union xfs_btree_ptr *pp;
union xfs_btree_ptr *cpp;
struct xfs_buf *cbp;
int level;
int index;
int numrecs;
#ifdef DEBUG
union xfs_btree_ptr ptr;
int i;
#endif
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(cur->bc_nlevels > 1);
/*
* Don't deal with the root block needs to be a leaf case.
* We're just going to turn the thing back into extents anyway.
*/
level = cur->bc_nlevels - 1;
if (level == 1)
goto out0;
/*
* Give up if the root has multiple children.
*/
block = xfs_btree_get_iroot(cur);
if (xfs_btree_get_numrecs(block) != 1)
goto out0;
cblock = xfs_btree_get_block(cur, level - 1, &cbp);
numrecs = xfs_btree_get_numrecs(cblock);
/*
* Only do this if the next level will fit.
* Then the data must be copied up to the inode,
* instead of freeing the root you free the next level.
*/
if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
goto out0;
XFS_BTREE_STATS_INC(cur, killroot);
#ifdef DEBUG
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
#endif
index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
if (index) {
xfs_iroot_realloc(cur->bc_private.b.ip, index,
cur->bc_private.b.whichfork);
block = ifp->if_broot;
}
be16_add_cpu(&block->bb_numrecs, index);
ASSERT(block->bb_numrecs == cblock->bb_numrecs);
kp = xfs_btree_key_addr(cur, 1, block);
ckp = xfs_btree_key_addr(cur, 1, cblock);
xfs_btree_copy_keys(cur, kp, ckp, numrecs);
pp = xfs_btree_ptr_addr(cur, 1, block);
cpp = xfs_btree_ptr_addr(cur, 1, cblock);
#ifdef DEBUG
for (i = 0; i < numrecs; i++) {
int error;
error = xfs_btree_check_ptr(cur, cpp, i, level - 1);
if (error) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
}
#endif
xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
cur->bc_ops->free_block(cur, cbp);
XFS_BTREE_STATS_INC(cur, free);
cur->bc_bufs[level - 1] = NULL;
be16_add_cpu(&block->bb_level, -1);
xfs_trans_log_inode(cur->bc_tp, ip,
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
cur->bc_nlevels--;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
}
/*
* Kill the current root node, and replace it with it's only child node.
*/
STATIC int
xfs_btree_kill_root(
struct xfs_btree_cur *cur,
struct xfs_buf *bp,
int level,
union xfs_btree_ptr *newroot)
{
int error;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_STATS_INC(cur, killroot);
/*
* Update the root pointer, decreasing the level by 1 and then
* free the old root.
*/
cur->bc_ops->set_root(cur, newroot, -1);
error = cur->bc_ops->free_block(cur, bp);
if (error) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
XFS_BTREE_STATS_INC(cur, free);
cur->bc_bufs[level] = NULL;
cur->bc_ra[level] = 0;
cur->bc_nlevels--;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
}
STATIC int
xfs_btree_dec_cursor(
struct xfs_btree_cur *cur,
int level,
int *stat)
{
int error;
int i;
if (level > 0) {
error = xfs_btree_decrement(cur, level, &i);
if (error)
return error;
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
}
/*
* Single level of the btree record deletion routine.
* Delete record pointed to by cur/level.
* Remove the record from its block then rebalance the tree.
* Return 0 for error, 1 for done, 2 to go on to the next level.
*/
STATIC int /* error */
xfs_btree_delrec(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level removing record from */
int *stat) /* fail/done/go-on */
{
struct xfs_btree_block *block; /* btree block */
union xfs_btree_ptr cptr; /* current block ptr */
struct xfs_buf *bp; /* buffer for block */
int error; /* error return value */
int i; /* loop counter */
union xfs_btree_key key; /* storage for keyp */
union xfs_btree_key *keyp = &key; /* passed to the next level */
union xfs_btree_ptr lptr; /* left sibling block ptr */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
int lrecs = 0; /* left record count */
int ptr; /* key/record index */
union xfs_btree_ptr rptr; /* right sibling block ptr */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
struct xfs_btree_block *rrblock; /* right-right btree block */
struct xfs_buf *rrbp; /* right-right buffer pointer */
int rrecs = 0; /* right record count */
struct xfs_btree_cur *tcur; /* temporary btree cursor */
int numrecs; /* temporary numrec count */
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
tcur = NULL;
/* Get the index of the entry being deleted, check for nothing there. */
ptr = cur->bc_ptrs[level];
if (ptr == 0) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
/* Get the buffer & block containing the record or key/ptr. */
block = xfs_btree_get_block(cur, level, &bp);
numrecs = xfs_btree_get_numrecs(block);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
goto error0;
#endif
/* Fail if we're off the end of the block. */
if (ptr > numrecs) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
XFS_BTREE_STATS_INC(cur, delrec);
XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
/* Excise the entries being deleted. */
if (level > 0) {
/* It's a nonleaf. operate on keys and ptrs */
union xfs_btree_key *lkp;
union xfs_btree_ptr *lpp;
lkp = xfs_btree_key_addr(cur, ptr + 1, block);
lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
#ifdef DEBUG
for (i = 0; i < numrecs - ptr; i++) {
error = xfs_btree_check_ptr(cur, lpp, i, level);
if (error)
goto error0;
}
#endif
if (ptr < numrecs) {
xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
}
/*
* If it's the first record in the block, we'll need to pass a
* key up to the next level (updkey).
*/
if (ptr == 1)
keyp = xfs_btree_key_addr(cur, 1, block);
} else {
/* It's a leaf. operate on records */
if (ptr < numrecs) {
xfs_btree_shift_recs(cur,
xfs_btree_rec_addr(cur, ptr + 1, block),
-1, numrecs - ptr);
xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
}
/*
* If it's the first record in the block, we'll need a key
* structure to pass up to the next level (updkey).
*/
if (ptr == 1) {
cur->bc_ops->init_key_from_rec(&key,
xfs_btree_rec_addr(cur, 1, block));
keyp = &key;
}
}
/*
* Decrement and log the number of entries in the block.
*/
xfs_btree_set_numrecs(block, --numrecs);
xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
/*
* If we are tracking the last record in the tree and
* we are at the far right edge of the tree, update it.
*/
if (xfs_btree_is_lastrec(cur, block, level)) {
cur->bc_ops->update_lastrec(cur, block, NULL,
ptr, LASTREC_DELREC);
}
/*
* We're at the root level. First, shrink the root block in-memory.
* Try to get rid of the next level down. If we can't then there's
* nothing left to do.
*/
if (level == cur->bc_nlevels - 1) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
xfs_iroot_realloc(cur->bc_private.b.ip, -1,
cur->bc_private.b.whichfork);
error = xfs_btree_kill_iroot(cur);
if (error)
goto error0;
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
*stat = 1;
return 0;
}
/*
* If this is the root level, and there's only one entry left,
* and it's NOT the leaf level, then we can get rid of this
* level.
*/
if (numrecs == 1 && level > 0) {
union xfs_btree_ptr *pp;
/*
* pp is still set to the first pointer in the block.
* Make it the new root of the btree.
*/
pp = xfs_btree_ptr_addr(cur, 1, block);
error = xfs_btree_kill_root(cur, bp, level, pp);
if (error)
goto error0;
} else if (level > 0) {
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
}
*stat = 1;
return 0;
}
/*
* If we deleted the leftmost entry in the block, update the
* key values above us in the tree.
*/
if (ptr == 1) {
error = xfs_btree_updkey(cur, keyp, level + 1);
if (error)
goto error0;
}
/*
* If the number of records remaining in the block is at least
* the minimum, we're done.
*/
if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
return 0;
}
/*
* Otherwise, we have to move some records around to keep the
* tree balanced. Look at the left and right sibling blocks to
* see if we can re-balance by moving only one record.
*/
xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
/*
* One child of root, need to get a chance to copy its contents
* into the root and delete it. Can't go up to next level,
* there's nothing to delete there.
*/
if (xfs_btree_ptr_is_null(cur, &rptr) &&
xfs_btree_ptr_is_null(cur, &lptr) &&
level == cur->bc_nlevels - 2) {
error = xfs_btree_kill_iroot(cur);
if (!error)
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
return 0;
}
}
ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
!xfs_btree_ptr_is_null(cur, &lptr));
/*
* Duplicate the cursor so our btree manipulations here won't
* disrupt the next level up.
*/
error = xfs_btree_dup_cursor(cur, &tcur);
if (error)
goto error0;
/*
* If there's a right sibling, see if it's ok to shift an entry
* out of it.
*/
if (!xfs_btree_ptr_is_null(cur, &rptr)) {
/*
* Move the temp cursor to the last entry in the next block.
* Actually any entry but the first would suffice.
*/
i = xfs_btree_lastrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
error = xfs_btree_increment(tcur, level, &i);
if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
i = xfs_btree_lastrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/* Grab a pointer to the block. */
right = xfs_btree_get_block(tcur, level, &rbp);
#ifdef DEBUG
error = xfs_btree_check_block(tcur, right, level, rbp);
if (error)
goto error0;
#endif
/* Grab the current block number, for future use. */
xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
/*
* If right block is full enough so that removing one entry
* won't make it too empty, and left-shifting an entry out
* of right to us works, we're done.
*/
if (xfs_btree_get_numrecs(right) - 1 >=
cur->bc_ops->get_minrecs(tcur, level)) {
error = xfs_btree_lshift(tcur, level, &i);
if (error)
goto error0;
if (i) {
ASSERT(xfs_btree_get_numrecs(block) >=
cur->bc_ops->get_minrecs(tcur, level));
xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
tcur = NULL;
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
return 0;
}
}
/*
* Otherwise, grab the number of records in right for
* future reference, and fix up the temp cursor to point
* to our block again (last record).
*/
rrecs = xfs_btree_get_numrecs(right);
if (!xfs_btree_ptr_is_null(cur, &lptr)) {
i = xfs_btree_firstrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
}
}
/*
* If there's a left sibling, see if it's ok to shift an entry
* out of it.
*/
if (!xfs_btree_ptr_is_null(cur, &lptr)) {
/*
* Move the temp cursor to the first entry in the
* previous block.
*/
i = xfs_btree_firstrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
i = xfs_btree_firstrec(tcur, level);
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/* Grab a pointer to the block. */
left = xfs_btree_get_block(tcur, level, &lbp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, left, level, lbp);
if (error)
goto error0;
#endif
/* Grab the current block number, for future use. */
xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
/*
* If left block is full enough so that removing one entry
* won't make it too empty, and right-shifting an entry out
* of left to us works, we're done.
*/
if (xfs_btree_get_numrecs(left) - 1 >=
cur->bc_ops->get_minrecs(tcur, level)) {
error = xfs_btree_rshift(tcur, level, &i);
if (error)
goto error0;
if (i) {
ASSERT(xfs_btree_get_numrecs(block) >=
cur->bc_ops->get_minrecs(tcur, level));
xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
tcur = NULL;
if (level == 0)
cur->bc_ptrs[0]++;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
}
}
/*
* Otherwise, grab the number of records in right for
* future reference.
*/
lrecs = xfs_btree_get_numrecs(left);
}
/* Delete the temp cursor, we're done with it. */
xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
tcur = NULL;
/* If here, we need to do a join to keep the tree balanced. */
ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
if (!xfs_btree_ptr_is_null(cur, &lptr) &&
lrecs + xfs_btree_get_numrecs(block) <=
cur->bc_ops->get_maxrecs(cur, level)) {
/*
* Set "right" to be the starting block,
* "left" to be the left neighbor.
*/
rptr = cptr;
right = block;
rbp = bp;
error = xfs_btree_read_buf_block(cur, &lptr, level,
0, &left, &lbp);
if (error)
goto error0;
/*
* If that won't work, see if we can join with the right neighbor block.
*/
} else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
rrecs + xfs_btree_get_numrecs(block) <=
cur->bc_ops->get_maxrecs(cur, level)) {
/*
* Set "left" to be the starting block,
* "right" to be the right neighbor.
*/
lptr = cptr;
left = block;
lbp = bp;
error = xfs_btree_read_buf_block(cur, &rptr, level,
0, &right, &rbp);
if (error)
goto error0;
/*
* Otherwise, we can't fix the imbalance.
* Just return. This is probably a logic error, but it's not fatal.
*/
} else {
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
return 0;
}
rrecs = xfs_btree_get_numrecs(right);
lrecs = xfs_btree_get_numrecs(left);
/*
* We're now going to join "left" and "right" by moving all the stuff
* in "right" to "left" and deleting "right".
*/
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
if (level > 0) {
/* It's a non-leaf. Move keys and pointers. */
union xfs_btree_key *lkp; /* left btree key */
union xfs_btree_ptr *lpp; /* left address pointer */
union xfs_btree_key *rkp; /* right btree key */
union xfs_btree_ptr *rpp; /* right address pointer */
lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
rkp = xfs_btree_key_addr(cur, 1, right);
rpp = xfs_btree_ptr_addr(cur, 1, right);
#ifdef DEBUG
for (i = 1; i < rrecs; i++) {
error = xfs_btree_check_ptr(cur, rpp, i, level);
if (error)
goto error0;
}
#endif
xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
} else {
/* It's a leaf. Move records. */
union xfs_btree_rec *lrp; /* left record pointer */
union xfs_btree_rec *rrp; /* right record pointer */
lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
}
XFS_BTREE_STATS_INC(cur, join);
/*
* Fix up the number of records and right block pointer in the
* surviving block, and log it.
*/
xfs_btree_set_numrecs(left, lrecs + rrecs);
xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB),
xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
/* If there is a right sibling, point it to the remaining block. */
xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
if (!xfs_btree_ptr_is_null(cur, &cptr)) {
error = xfs_btree_read_buf_block(cur, &cptr, level,
0, &rrblock, &rrbp);
if (error)
goto error0;
xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
}
/* Free the deleted block. */
error = cur->bc_ops->free_block(cur, rbp);
if (error)
goto error0;
XFS_BTREE_STATS_INC(cur, free);
/*
* If we joined with the left neighbor, set the buffer in the
* cursor to the left block, and fix up the index.
*/
if (bp != lbp) {
cur->bc_bufs[level] = lbp;
cur->bc_ptrs[level] += lrecs;
cur->bc_ra[level] = 0;
}
/*
* If we joined with the right neighbor and there's a level above
* us, increment the cursor at that level.
*/
else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
(level + 1 < cur->bc_nlevels)) {
error = xfs_btree_increment(cur, level + 1, &i);
if (error)
goto error0;
}
/*
* Readjust the ptr at this level if it's not a leaf, since it's
* still pointing at the deletion point, which makes the cursor
* inconsistent. If this makes the ptr 0, the caller fixes it up.
* We can't use decrement because it would change the next level up.
*/
if (level > 0)
cur->bc_ptrs[level]--;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
/* Return value means the next level up has something to do. */
*stat = 2;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
if (tcur)
xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
return error;
}
/*
* Delete the record pointed to by cur.
* The cursor refers to the place where the record was (could be inserted)
* when the operation returns.
*/
int /* error */
xfs_btree_delete(
struct xfs_btree_cur *cur,
int *stat) /* success/failure */
{
int error; /* error return value */
int level;
int i;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
/*
* Go up the tree, starting at leaf level.
*
* If 2 is returned then a join was done; go to the next level.
* Otherwise we are done.
*/
for (level = 0, i = 2; i == 2; level++) {
error = xfs_btree_delrec(cur, level, &i);
if (error)
goto error0;
}
if (i == 0) {
for (level = 1; level < cur->bc_nlevels; level++) {
if (cur->bc_ptrs[level] == 0) {
error = xfs_btree_decrement(cur, level, &i);
if (error)
goto error0;
break;
}
}
}
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = i;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
/*
* Get the data from the pointed-to record.
*/
int /* error */
xfs_btree_get_rec(
struct xfs_btree_cur *cur, /* btree cursor */
union xfs_btree_rec **recp, /* output: btree record */
int *stat) /* output: success/failure */
{
struct xfs_btree_block *block; /* btree block */
struct xfs_buf *bp; /* buffer pointer */
int ptr; /* record number */
#ifdef DEBUG
int error; /* error return value */
#endif
ptr = cur->bc_ptrs[0];
block = xfs_btree_get_block(cur, 0, &bp);
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, 0, bp);
if (error)
return error;
#endif
/*
* Off the right end or left end, return failure.
*/
if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
*stat = 0;
return 0;
}
/*
* Point to the record and extract its data.
*/
*recp = xfs_btree_rec_addr(cur, ptr, block);
*stat = 1;
return 0;
}
| gpl-2.0 |
TheMeier/lge-kernel-sniper-linux-stable | drivers/i2c/busses/i2c-tiny-usb.c | 4186 | 7776 | /*
* driver for the i2c-tiny-usb adapter - 1.0
* http://www.harbaum.org/till/i2c_tiny_usb
*
* Copyright (C) 2006-2007 Till Harbaum (Till@Harbaum.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
/* include interfaces to usb layer */
#include <linux/usb.h>
/* include interface to i2c layer */
#include <linux/i2c.h>
/* commands via USB, must match command ids in the firmware */
#define CMD_ECHO 0
#define CMD_GET_FUNC 1
#define CMD_SET_DELAY 2
#define CMD_GET_STATUS 3
#define CMD_I2C_IO 4
#define CMD_I2C_IO_BEGIN (1<<0)
#define CMD_I2C_IO_END (1<<1)
/* i2c bit delay, default is 10us -> 100kHz max
(in practice, due to additional delays in the i2c bitbanging
code this results in a i2c clock of about 50kHz) */
static unsigned short delay = 10;
module_param(delay, ushort, 0);
MODULE_PARM_DESC(delay, "bit delay in microseconds "
"(default is 10us for 100kHz max)");
static int usb_read(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len);
static int usb_write(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len);
/* ----- begin of i2c layer ---------------------------------------------- */
#define STATUS_IDLE 0
#define STATUS_ADDRESS_ACK 1
#define STATUS_ADDRESS_NAK 2
static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
unsigned char status;
struct i2c_msg *pmsg;
int i;
dev_dbg(&adapter->dev, "master xfer %d messages:\n", num);
for (i = 0 ; i < num ; i++) {
int cmd = CMD_I2C_IO;
if (i == 0)
cmd |= CMD_I2C_IO_BEGIN;
if (i == num-1)
cmd |= CMD_I2C_IO_END;
pmsg = &msgs[i];
dev_dbg(&adapter->dev,
" %d: %s (flags %d) %d bytes to 0x%02x\n",
i, pmsg->flags & I2C_M_RD ? "read" : "write",
pmsg->flags, pmsg->len, pmsg->addr);
/* and directly send the message */
if (pmsg->flags & I2C_M_RD) {
/* read data */
if (usb_read(adapter, cmd,
pmsg->flags, pmsg->addr,
pmsg->buf, pmsg->len) != pmsg->len) {
dev_err(&adapter->dev,
"failure reading data\n");
return -EREMOTEIO;
}
} else {
/* write data */
if (usb_write(adapter, cmd,
pmsg->flags, pmsg->addr,
pmsg->buf, pmsg->len) != pmsg->len) {
dev_err(&adapter->dev,
"failure writing data\n");
return -EREMOTEIO;
}
}
/* read status */
if (usb_read(adapter, CMD_GET_STATUS, 0, 0, &status, 1) != 1) {
dev_err(&adapter->dev, "failure reading status\n");
return -EREMOTEIO;
}
dev_dbg(&adapter->dev, " status = %d\n", status);
if (status == STATUS_ADDRESS_NAK)
return -EREMOTEIO;
}
return i;
}
static u32 usb_func(struct i2c_adapter *adapter)
{
__le32 func;
/* get functionality from adapter */
if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
sizeof(func)) {
dev_err(&adapter->dev, "failure reading functionality\n");
return 0;
}
return le32_to_cpu(func);
}
/* This is the actual algorithm we define */
static const struct i2c_algorithm usb_algorithm = {
.master_xfer = usb_xfer,
.functionality = usb_func,
};
/* ----- end of i2c layer ------------------------------------------------ */
/* ----- begin of usb layer ---------------------------------------------- */
/*
* Initially the usb i2c interface uses a vid/pid pair donated by
* Future Technology Devices International Ltd., later a pair was
* bought from EZPrototypes
*/
static const struct usb_device_id i2c_tiny_usb_table[] = {
{ USB_DEVICE(0x0403, 0xc631) }, /* FTDI */
{ USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, i2c_tiny_usb_table);
/* Structure to hold all of our device specific stuff */
struct i2c_tiny_usb {
struct usb_device *usb_dev; /* the usb device for this device */
struct usb_interface *interface; /* the interface for this device */
struct i2c_adapter adapter; /* i2c related things */
};
static int usb_read(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len)
{
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
/* do control transfer */
return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
USB_DIR_IN, value, index, data, len, 2000);
}
static int usb_write(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len)
{
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
/* do control transfer */
return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
value, index, data, len, 2000);
}
static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
{
usb_put_dev(dev->usb_dev);
kfree(dev);
}
static int i2c_tiny_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct i2c_tiny_usb *dev;
int retval = -ENOMEM;
u16 version;
dev_dbg(&interface->dev, "probing usb device\n");
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
goto error;
}
dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
version = le16_to_cpu(dev->usb_dev->descriptor.bcdDevice);
dev_info(&interface->dev,
"version %x.%02x found at bus %03d address %03d\n",
version >> 8, version & 0xff,
dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
/* setup i2c adapter description */
dev->adapter.owner = THIS_MODULE;
dev->adapter.class = I2C_CLASS_HWMON;
dev->adapter.algo = &usb_algorithm;
dev->adapter.algo_data = dev;
snprintf(dev->adapter.name, sizeof(dev->adapter.name),
"i2c-tiny-usb at bus %03d device %03d",
dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
dev_err(&dev->adapter.dev,
"failure setting delay to %dus\n", delay);
retval = -EIO;
goto error;
}
dev->adapter.dev.parent = &dev->interface->dev;
/* and finally attach to i2c layer */
i2c_add_adapter(&dev->adapter);
/* inform user about successful attachment to i2c layer */
dev_info(&dev->adapter.dev, "connected i2c-tiny-usb device\n");
return 0;
error:
if (dev)
i2c_tiny_usb_free(dev);
return retval;
}
static void i2c_tiny_usb_disconnect(struct usb_interface *interface)
{
struct i2c_tiny_usb *dev = usb_get_intfdata(interface);
i2c_del_adapter(&dev->adapter);
usb_set_intfdata(interface, NULL);
i2c_tiny_usb_free(dev);
dev_dbg(&interface->dev, "disconnected\n");
}
static struct usb_driver i2c_tiny_usb_driver = {
.name = "i2c-tiny-usb",
.probe = i2c_tiny_usb_probe,
.disconnect = i2c_tiny_usb_disconnect,
.id_table = i2c_tiny_usb_table,
};
static int __init usb_i2c_tiny_usb_init(void)
{
/* register this driver with the USB subsystem */
return usb_register(&i2c_tiny_usb_driver);
}
static void __exit usb_i2c_tiny_usb_exit(void)
{
/* deregister this driver with the USB subsystem */
usb_deregister(&i2c_tiny_usb_driver);
}
module_init(usb_i2c_tiny_usb_init);
module_exit(usb_i2c_tiny_usb_exit);
/* ----- end of usb layer ------------------------------------------------ */
MODULE_AUTHOR("Till Harbaum <Till@Harbaum.org>");
MODULE_DESCRIPTION("i2c-tiny-usb driver v1.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
prisciou/android_kernel_wiko_s9321 | arch/mn10300/kernel/irq.c | 4698 | 9930 | /* MN10300 Arch-specific interrupt handling
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <asm/setup.h>
#include <asm/serial-regs.h>
unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
[0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
};
EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
#ifdef CONFIG_SMP
static char irq_affinity_online[NR_IRQS] = {
[0 ... NR_IRQS - 1] = 0
};
#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
[0 ... NR_IRQ_WORDS - 1] = 0
};
#endif /* CONFIG_SMP */
atomic_t irq_err_count;
/*
* MN10300 interrupt controller operations
*/
static void mn10300_cpupic_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
GxICR_u8(irq) = GxICR_DETECT;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void __mask_and_set_icr(unsigned int irq,
unsigned int mask, unsigned int set)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = (tmp & mask) | set;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void mn10300_cpupic_mask(struct irq_data *d)
{
__mask_and_set_icr(d->irq, GxICR_LEVEL, 0);
}
static void mn10300_cpupic_mask_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
#ifdef CONFIG_SMP
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
if (!test_and_clear_bit(irq, irq_affinity_request)) {
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
tmp = GxICR(irq);
} else {
u16 tmp2;
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL);
tmp2 = GxICR(irq);
irq_affinity_online[irq] =
cpumask_any_and(d->affinity, cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) =
(tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
}
arch_local_irq_restore(flags);
#else /* CONFIG_SMP */
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
#endif /* CONFIG_SMP */
}
static void mn10300_cpupic_unmask(struct irq_data *d)
{
__mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE);
}
static void mn10300_cpupic_unmask_clear(struct irq_data *d)
{
unsigned int irq = d->irq;
/* the MN10300 PIC latches its interrupt request bit, even after the
* device has ceased to assert its interrupt line and the interrupt
* channel has been disabled in the PIC, so for level-triggered
* interrupts we need to clear the request bit when we re-enable */
#ifdef CONFIG_SMP
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
if (!test_and_clear_bit(irq, irq_affinity_request)) {
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = GxICR(irq);
} else {
tmp = GxICR(irq);
irq_affinity_online[irq] = cpumask_any_and(d->affinity,
cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
}
arch_local_irq_restore(flags);
#else /* CONFIG_SMP */
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
#endif /* CONFIG_SMP */
}
#ifdef CONFIG_SMP
static int
mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
bool force)
{
unsigned long flags;
int err;
flags = arch_local_cli_save();
/* check irq no */
switch (d->irq) {
case TMJCIRQ:
case RESCHEDULE_IPI:
case CALL_FUNC_SINGLE_IPI:
case LOCAL_TIMER_IPI:
case FLUSH_CACHE_IPI:
case CALL_FUNCTION_NMI_IPI:
case DEBUGGER_NMI_IPI:
#ifdef CONFIG_MN10300_TTYSM0
case SC0RXIRQ:
case SC0TXIRQ:
#ifdef CONFIG_MN10300_TTYSM0_TIMER8
case TM8IRQ:
#elif CONFIG_MN10300_TTYSM0_TIMER2
case TM2IRQ:
#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
#endif /* CONFIG_MN10300_TTYSM0 */
#ifdef CONFIG_MN10300_TTYSM1
case SC1RXIRQ:
case SC1TXIRQ:
#ifdef CONFIG_MN10300_TTYSM1_TIMER12
case TM12IRQ:
#elif CONFIG_MN10300_TTYSM1_TIMER9
case TM9IRQ:
#elif CONFIG_MN10300_TTYSM1_TIMER3
case TM3IRQ:
#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
#endif /* CONFIG_MN10300_TTYSM1 */
#ifdef CONFIG_MN10300_TTYSM2
case SC2RXIRQ:
case SC2TXIRQ:
case TM10IRQ:
#endif /* CONFIG_MN10300_TTYSM2 */
err = -1;
break;
default:
set_bit(d->irq, irq_affinity_request);
err = 0;
break;
}
arch_local_irq_restore(flags);
return err;
}
#endif /* CONFIG_SMP */
/*
* MN10300 PIC level-triggered IRQ handling.
*
* The PIC has no 'ACK' function per se. It is possible to clear individual
* channel latches, but each latch relatches whether or not the channel is
* masked, so we need to clear the latch when we unmask the channel.
*
* Also for this reason, we don't supply an ack() op (it's unused anyway if
* mask_ack() is provided), and mask_ack() just masks.
*/
static struct irq_chip mn10300_cpu_pic_level = {
.name = "cpu_l",
.irq_disable = mn10300_cpupic_mask,
.irq_enable = mn10300_cpupic_unmask_clear,
.irq_ack = NULL,
.irq_mask = mn10300_cpupic_mask,
.irq_mask_ack = mn10300_cpupic_mask,
.irq_unmask = mn10300_cpupic_unmask_clear,
#ifdef CONFIG_SMP
.irq_set_affinity = mn10300_cpupic_setaffinity,
#endif
};
/*
* MN10300 PIC edge-triggered IRQ handling.
*
* We use the latch clearing function of the PIC as the 'ACK' function.
*/
static struct irq_chip mn10300_cpu_pic_edge = {
.name = "cpu_e",
.irq_disable = mn10300_cpupic_mask,
.irq_enable = mn10300_cpupic_unmask,
.irq_ack = mn10300_cpupic_ack,
.irq_mask = mn10300_cpupic_mask,
.irq_mask_ack = mn10300_cpupic_mask_ack,
.irq_unmask = mn10300_cpupic_unmask,
#ifdef CONFIG_SMP
.irq_set_affinity = mn10300_cpupic_setaffinity,
#endif
};
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(int irq)
{
printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
}
/*
* change the level at which an IRQ executes
* - must not be called whilst interrupts are being processed!
*/
void set_intr_level(int irq, u16 level)
{
BUG_ON(in_interrupt());
__mask_and_set_icr(irq, GxICR_ENABLE, level);
}
/*
* mark an interrupt to be ACK'd after interrupt handlers have been run rather
* than before
*/
void mn10300_set_lateack_irq_type(int irq)
{
irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level,
handle_level_irq);
}
/*
* initialise the interrupt system
*/
void __init init_IRQ(void)
{
int irq;
for (irq = 0; irq < NR_IRQS; irq++)
if (irq_get_chip(irq) == &no_irq_chip)
/* due to the PIC latching interrupt requests, even
* when the IRQ is disabled, IRQ_PENDING is superfluous
* and we can use handle_level_irq() for edge-triggered
* interrupts */
irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,
handle_level_irq);
unit_init_IRQ();
}
/*
* handle normal device IRQs
*/
asmlinkage void do_IRQ(void)
{
unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
unsigned int cpu_id = smp_processor_id();
int irq;
sp = current_stack_pointer();
BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
/* make sure local_irq_enable() doesn't muck up the interrupt priority
* setting in EPSW */
old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
local_save_flags(epsw);
__mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
#ifdef CONFIG_MN10300_WD_TIMER
__IRQ_STAT(cpu_id, __irq_count)++;
#endif
irq_enter();
for (;;) {
/* ask the interrupt controller for the next IRQ to process
* - the result we get depends on EPSW.IM
*/
irq = IAGR & IAGR_GN;
if (!irq)
break;
local_irq_restore(irq_disabled_epsw);
generic_handle_irq(irq >> 2);
/* restore IRQ controls for IAGR access */
local_irq_restore(epsw);
}
__mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
irq_exit();
}
/*
* Display interrupt management information through /proc/interrupts
*/
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_MN10300_WD_TIMER
int j;
seq_printf(p, "%*s: ", prec, "NMI");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
#endif
seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
void migrate_irqs(void)
{
int irq;
unsigned int self, new;
unsigned long flags;
self = smp_processor_id();
for (irq = 0; irq < NR_IRQS; irq++) {
struct irq_data *data = irq_get_irq_data(irq);
if (irqd_is_per_cpu(data))
continue;
if (cpumask_test_cpu(self, &data->affinity) &&
!cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
int cpu_id;
cpu_id = cpumask_first(cpu_online_mask);
cpumask_set_cpu(cpu_id, &data->affinity);
}
/* We need to operate irq_affinity_online atomically. */
arch_local_cli_save(flags);
if (irq_affinity_online[irq] == self) {
u16 x, tmp;
x = GxICR(irq);
GxICR(irq) = x & GxICR_LEVEL;
tmp = GxICR(irq);
new = cpumask_any_and(&data->affinity,
cpu_online_mask);
irq_affinity_online[irq] = new;
CROSS_GxICR(irq, new) =
(x & GxICR_LEVEL) | GxICR_DETECT;
tmp = CROSS_GxICR(irq, new);
x &= GxICR_LEVEL | GxICR_ENABLE;
if (GxICR(irq) & GxICR_REQUEST)
x |= GxICR_REQUEST | GxICR_DETECT;
CROSS_GxICR(irq, new) = x;
tmp = CROSS_GxICR(irq, new);
}
arch_local_irq_restore(flags);
}
}
#endif /* CONFIG_HOTPLUG_CPU */
| gpl-2.0 |
Split-Screen/android_kernel_samsung_trlte | arch/powerpc/platforms/embedded6xx/c2k.c | 8794 | 3681 | /*
* Board setup routines for the GEFanuc C2K board
*
* Author: Remi Machet <rmachet@slac.stanford.edu>
*
* Originated from prpmc2800.c
*
* 2008 (c) Stanford University
* 2007 (c) MontaVista, Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/time.h>
#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/time.h>
#include <mm/mmu_decl.h>
#include <sysdev/mv64x60.h>
#define MV64x60_MPP_CNTL_0 0x0000
#define MV64x60_MPP_CNTL_2 0x0008
#define MV64x60_GPP_IO_CNTL 0x0000
#define MV64x60_GPP_LEVEL_CNTL 0x0010
#define MV64x60_GPP_VALUE_SET 0x0018
static void __iomem *mv64x60_mpp_reg_base;
static void __iomem *mv64x60_gpp_reg_base;
static void __init c2k_setup_arch(void)
{
struct device_node *np;
phys_addr_t paddr;
const unsigned int *reg;
/*
* ioremap mpp and gpp registers in case they are later
* needed by c2k_reset_board().
*/
np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-mpp");
reg = of_get_property(np, "reg", NULL);
paddr = of_translate_address(np, reg);
of_node_put(np);
mv64x60_mpp_reg_base = ioremap(paddr, reg[1]);
np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp");
reg = of_get_property(np, "reg", NULL);
paddr = of_translate_address(np, reg);
of_node_put(np);
mv64x60_gpp_reg_base = ioremap(paddr, reg[1]);
#ifdef CONFIG_PCI
mv64x60_pci_init();
#endif
}
static void c2k_reset_board(void)
{
u32 temp;
local_irq_disable();
temp = in_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_0);
temp &= 0xFFFF0FFF;
out_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_0, temp);
temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL);
temp |= 0x00000004;
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL, temp);
temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL);
temp |= 0x00000004;
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL, temp);
temp = in_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_2);
temp &= 0xFFFF0FFF;
out_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_2, temp);
temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL);
temp |= 0x00080000;
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL, temp);
temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL);
temp |= 0x00080000;
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL, temp);
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_VALUE_SET, 0x00080004);
}
static void c2k_restart(char *cmd)
{
c2k_reset_board();
msleep(100);
panic("restart failed\n");
}
#ifdef CONFIG_NOT_COHERENT_CACHE
#define COHERENCY_SETTING "off"
#else
#define COHERENCY_SETTING "on"
#endif
void c2k_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "Vendor\t\t: GEFanuc\n");
seq_printf(m, "coherency\t: %s\n", COHERENCY_SETTING);
}
/*
* Called very early, device-tree isn't unflattened
*/
static int __init c2k_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "GEFanuc,C2K"))
return 0;
printk(KERN_INFO "Detected a GEFanuc C2K board\n");
_set_L2CR(0);
_set_L2CR(L2CR_L2E | L2CR_L2PE | L2CR_L2I);
return 1;
}
define_machine(c2k) {
.name = "C2K",
.probe = c2k_probe,
.setup_arch = c2k_setup_arch,
.init_early = mv64x60_init_early,
.show_cpuinfo = c2k_show_cpuinfo,
.init_IRQ = mv64x60_init_irq,
.get_irq = mv64x60_get_irq,
.restart = c2k_restart,
.calibrate_decr = generic_calibrate_decr,
};
| gpl-2.0 |
croniccorey/linux-amlogic | arch/powerpc/platforms/embedded6xx/holly.c | 8794 | 7280 | /*
* Board setup routines for the IBM 750GX/CL platform w/ TSI10x bridge
*
* Copyright 2007 IBM Corporation
*
* Stephen Winiecki <stevewin@us.ibm.com>
* Josh Boyer <jwboyer@linux.vnet.ibm.com>
*
* Based on code from mpc7448_hpc2.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <linux/of_platform.h>
#include <linux/module.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/tsi108.h>
#include <asm/pci-bridge.h>
#include <asm/reg.h>
#include <mm/mmu_decl.h>
#include <asm/tsi108_irq.h>
#include <asm/tsi108_pci.h>
#include <asm/mpic.h>
#undef DEBUG
#define HOLLY_PCI_CFG_PHYS 0x7c000000
int holly_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
else
return PCIBIOS_SUCCESSFUL;
}
static void holly_remap_bridge(void)
{
u32 lut_val, lut_addr;
int i;
printk(KERN_INFO "Remapping PCI bridge\n");
/* Re-init the PCI bridge and LUT registers to have mappings that don't
* rely on PIBS
*/
lut_addr = 0x900;
for (i = 0; i < 31; i++) {
tsi108_write_reg(TSI108_PB_OFFSET + lut_addr, 0x00000201);
lut_addr += 4;
tsi108_write_reg(TSI108_PB_OFFSET + lut_addr, 0x0);
lut_addr += 4;
}
/* Reserve the last LUT entry for PCI I/O space */
tsi108_write_reg(TSI108_PB_OFFSET + lut_addr, 0x00000241);
lut_addr += 4;
tsi108_write_reg(TSI108_PB_OFFSET + lut_addr, 0x0);
/* Map PCI I/O space */
tsi108_write_reg(TSI108_PCI_PFAB_IO_UPPER, 0x0);
tsi108_write_reg(TSI108_PCI_PFAB_IO, 0x1);
/* Map PCI CFG space */
tsi108_write_reg(TSI108_PCI_PFAB_BAR0_UPPER, 0x0);
tsi108_write_reg(TSI108_PCI_PFAB_BAR0, 0x7c000000 | 0x01);
/* We don't need MEM32 and PRM remapping so disable them */
tsi108_write_reg(TSI108_PCI_PFAB_MEM32, 0x0);
tsi108_write_reg(TSI108_PCI_PFAB_PFM3, 0x0);
tsi108_write_reg(TSI108_PCI_PFAB_PFM4, 0x0);
/* Set P2O_BAR0 */
tsi108_write_reg(TSI108_PCI_P2O_BAR0_UPPER, 0x0);
tsi108_write_reg(TSI108_PCI_P2O_BAR0, 0xc0000000);
/* Init the PCI LUTs to do no remapping */
lut_addr = 0x500;
lut_val = 0x00000002;
for (i = 0; i < 32; i++) {
tsi108_write_reg(TSI108_PCI_OFFSET + lut_addr, lut_val);
lut_addr += 4;
tsi108_write_reg(TSI108_PCI_OFFSET + lut_addr, 0x40000000);
lut_addr += 4;
lut_val += 0x02000000;
}
tsi108_write_reg(TSI108_PCI_P2O_PAGE_SIZES, 0x00007900);
/* Set 64-bit PCI bus address for system memory */
tsi108_write_reg(TSI108_PCI_P2O_BAR2_UPPER, 0x0);
tsi108_write_reg(TSI108_PCI_P2O_BAR2, 0x0);
}
static void __init holly_setup_arch(void)
{
struct device_node *np;
if (ppc_md.progress)
ppc_md.progress("holly_setup_arch():set_bridge", 0);
tsi108_csr_vir_base = get_vir_csrbase();
/* setup PCI host bridge */
holly_remap_bridge();
np = of_find_node_by_type(NULL, "pci");
if (np)
tsi108_setup_pci(np, HOLLY_PCI_CFG_PHYS, 1);
ppc_md.pci_exclude_device = holly_exclude_device;
if (ppc_md.progress)
ppc_md.progress("tsi108: resources set", 0x100);
printk(KERN_INFO "PPC750GX/CL Platform\n");
}
/*
* Interrupt setup and service. Interrupts on the holly come
* from the four external INT pins, PCI interrupts are routed via
* PCI interrupt control registers, it generates internal IRQ23
*
* Interrupt routing on the Holly Board:
* TSI108:PB_INT[0] -> CPU0:INT#
* TSI108:PB_INT[1] -> CPU0:MCP#
* TSI108:PB_INT[2] -> N/C
* TSI108:PB_INT[3] -> N/C
*/
static void __init holly_init_IRQ(void)
{
struct mpic *mpic;
#ifdef CONFIG_PCI
unsigned int cascade_pci_irq;
struct device_node *tsi_pci;
struct device_node *cascade_node = NULL;
#endif
mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
24, 0,
"Tsi108_PIC");
BUG_ON(mpic == NULL);
mpic_assign_isu(mpic, 0, mpic->paddr + 0x100);
mpic_init(mpic);
#ifdef CONFIG_PCI
tsi_pci = of_find_node_by_type(NULL, "pci");
if (tsi_pci == NULL) {
printk(KERN_ERR "%s: No tsi108 pci node found !\n", __func__);
return;
}
cascade_node = of_find_node_by_type(NULL, "pic-router");
if (cascade_node == NULL) {
printk(KERN_ERR "%s: No tsi108 pci cascade node found !\n", __func__);
return;
}
cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq);
tsi108_pci_int_init(cascade_node);
irq_set_handler_data(cascade_pci_irq, mpic);
irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
}
void holly_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "vendor\t\t: IBM\n");
seq_printf(m, "machine\t\t: PPC750 GX/CL\n");
}
void holly_restart(char *cmd)
{
__be32 __iomem *ocn_bar1 = NULL;
unsigned long bar;
struct device_node *bridge = NULL;
const void *prop;
int size;
phys_addr_t addr = 0xc0000000;
local_irq_disable();
bridge = of_find_node_by_type(NULL, "tsi-bridge");
if (bridge) {
prop = of_get_property(bridge, "reg", &size);
addr = of_translate_address(bridge, prop);
}
addr += (TSI108_PB_OFFSET + 0x414);
ocn_bar1 = ioremap(addr, 0x4);
/* Turn on the BOOT bit so the addresses are correctly
* routed to the HLP interface */
bar = ioread32be(ocn_bar1);
bar |= 2;
iowrite32be(bar, ocn_bar1);
iosync();
/* Set SRR0 to the reset vector and turn on MSR_IP */
mtspr(SPRN_SRR0, 0xfff00100);
mtspr(SPRN_SRR1, MSR_IP);
/* Do an rfi to jump back to firmware. Somewhat evil,
* but it works
*/
__asm__ __volatile__("rfi" : : : "memory");
/* Spin until reset happens. Shouldn't really get here */
for (;;) ;
}
void holly_power_off(void)
{
local_irq_disable();
/* No way to shut power off with software */
for (;;) ;
}
void holly_halt(void)
{
holly_power_off();
}
/*
* Called very early, device-tree isn't unflattened
*/
static int __init holly_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "ibm,holly"))
return 0;
return 1;
}
static int ppc750_machine_check_exception(struct pt_regs *regs)
{
const struct exception_table_entry *entry;
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
regs->msr |= MSR_RI;
regs->nip = entry->fixup;
return 1;
}
return 0;
}
define_machine(holly){
.name = "PPC750 GX/CL TSI",
.probe = holly_probe,
.setup_arch = holly_setup_arch,
.init_IRQ = holly_init_IRQ,
.show_cpuinfo = holly_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = holly_restart,
.calibrate_decr = generic_calibrate_decr,
.machine_check_exception = ppc750_machine_check_exception,
.progress = udbg_progress,
};
| gpl-2.0 |
Team-Cody/android_kernel_htc_pico | arch/mips/txx9/generic/smsc_fdc37m81x.c | 8794 | 4978 | /*
* Interface for smsc fdc48m81x Super IO chip
*
* Author: MontaVista Software, Inc. source@mvista.com
*
* 2001-2003 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*
* Copyright 2004 (c) MontaVista Software, Inc.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/txx9/smsc_fdc37m81x.h>
/* Common Registers */
#define SMSC_FDC37M81X_CONFIG_INDEX 0x00
#define SMSC_FDC37M81X_CONFIG_DATA 0x01
#define SMSC_FDC37M81X_CONF 0x02
#define SMSC_FDC37M81X_INDEX 0x03
#define SMSC_FDC37M81X_DNUM 0x07
#define SMSC_FDC37M81X_DID 0x20
#define SMSC_FDC37M81X_DREV 0x21
#define SMSC_FDC37M81X_PCNT 0x22
#define SMSC_FDC37M81X_PMGT 0x23
#define SMSC_FDC37M81X_OSC 0x24
#define SMSC_FDC37M81X_CONFPA0 0x26
#define SMSC_FDC37M81X_CONFPA1 0x27
#define SMSC_FDC37M81X_TEST4 0x2B
#define SMSC_FDC37M81X_TEST5 0x2C
#define SMSC_FDC37M81X_TEST1 0x2D
#define SMSC_FDC37M81X_TEST2 0x2E
#define SMSC_FDC37M81X_TEST3 0x2F
/* Logical device numbers */
#define SMSC_FDC37M81X_FDD 0x00
#define SMSC_FDC37M81X_SERIAL1 0x04
#define SMSC_FDC37M81X_SERIAL2 0x05
#define SMSC_FDC37M81X_KBD 0x07
/* Logical device Config Registers */
#define SMSC_FDC37M81X_ACTIVE 0x30
#define SMSC_FDC37M81X_BASEADDR0 0x60
#define SMSC_FDC37M81X_BASEADDR1 0x61
#define SMSC_FDC37M81X_INT 0x70
#define SMSC_FDC37M81X_INT2 0x72
#define SMSC_FDC37M81X_MODE 0xF0
/* Chip Config Values */
#define SMSC_FDC37M81X_CONFIG_ENTER 0x55
#define SMSC_FDC37M81X_CONFIG_EXIT 0xaa
#define SMSC_FDC37M81X_CHIP_ID 0x4d
static unsigned long g_smsc_fdc37m81x_base;
static inline unsigned char smsc_fdc37m81x_rd(unsigned char index)
{
outb(index, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX);
return inb(g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_DATA);
}
static inline void smsc_dc37m81x_wr(unsigned char index, unsigned char data)
{
outb(index, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX);
outb(data, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_DATA);
}
void smsc_fdc37m81x_config_beg(void)
{
if (g_smsc_fdc37m81x_base) {
outb(SMSC_FDC37M81X_CONFIG_ENTER,
g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX);
}
}
void smsc_fdc37m81x_config_end(void)
{
if (g_smsc_fdc37m81x_base)
outb(SMSC_FDC37M81X_CONFIG_EXIT,
g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX);
}
u8 smsc_fdc37m81x_config_get(u8 reg)
{
u8 val = 0;
if (g_smsc_fdc37m81x_base)
val = smsc_fdc37m81x_rd(reg);
return val;
}
void smsc_fdc37m81x_config_set(u8 reg, u8 val)
{
if (g_smsc_fdc37m81x_base)
smsc_dc37m81x_wr(reg, val);
}
unsigned long __init smsc_fdc37m81x_init(unsigned long port)
{
const int field = sizeof(unsigned long) * 2;
u8 chip_id;
if (g_smsc_fdc37m81x_base)
printk(KERN_WARNING "%s: stepping on old base=0x%0*lx\n",
__func__,
field, g_smsc_fdc37m81x_base);
g_smsc_fdc37m81x_base = port;
smsc_fdc37m81x_config_beg();
chip_id = smsc_fdc37m81x_rd(SMSC_FDC37M81X_DID);
if (chip_id == SMSC_FDC37M81X_CHIP_ID)
smsc_fdc37m81x_config_end();
else {
printk(KERN_WARNING "%s: unknown chip id 0x%02x\n", __func__,
chip_id);
g_smsc_fdc37m81x_base = 0;
}
return g_smsc_fdc37m81x_base;
}
#ifdef DEBUG
static void smsc_fdc37m81x_config_dump_one(const char *key, u8 dev, u8 reg)
{
printk(KERN_INFO "%s: dev=0x%02x reg=0x%02x val=0x%02x\n",
key, dev, reg,
smsc_fdc37m81x_rd(reg));
}
void smsc_fdc37m81x_config_dump(void)
{
u8 orig;
const char *fname = __func__;
smsc_fdc37m81x_config_beg();
orig = smsc_fdc37m81x_rd(SMSC_FDC37M81X_DNUM);
printk(KERN_INFO "%s: common\n", fname);
smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
SMSC_FDC37M81X_DNUM);
smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
SMSC_FDC37M81X_DID);
smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
SMSC_FDC37M81X_DREV);
smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
SMSC_FDC37M81X_PCNT);
smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
SMSC_FDC37M81X_PMGT);
printk(KERN_INFO "%s: keyboard\n", fname);
smsc_dc37m81x_wr(SMSC_FDC37M81X_DNUM, SMSC_FDC37M81X_KBD);
smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD,
SMSC_FDC37M81X_ACTIVE);
smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD,
SMSC_FDC37M81X_INT);
smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD,
SMSC_FDC37M81X_INT2);
smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD,
SMSC_FDC37M81X_LDCR_F0);
smsc_dc37m81x_wr(SMSC_FDC37M81X_DNUM, orig);
smsc_fdc37m81x_config_end();
}
#endif
| gpl-2.0 |
OpenSEMC/android_kernel_sony_msm8x60 | arch/mips/powertv/reset.c | 10842 | 1425 | /*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
* Portions copyright (C) 2009 Cisco Systems, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/pm.h>
#include <linux/io.h>
#include <asm/reboot.h> /* Not included by linux/reboot.h */
#ifdef CONFIG_BOOTLOADER_DRIVER
#include <asm/mach-powertv/kbldr.h>
#endif
#include <asm/mach-powertv/asic_regs.h>
#include "reset.h"
static void mips_machine_restart(char *command)
{
#ifdef CONFIG_BOOTLOADER_DRIVER
/*
* Call the bootloader's reset function to ensure
* that persistent data is flushed before hard reset
*/
kbldr_SetCauseAndReset();
#else
writel(0x1, asic_reg_addr(watchdog));
#endif
}
void mips_reboot_setup(void)
{
_machine_restart = mips_machine_restart;
}
| gpl-2.0 |
treejames/exynos4_uboot | common/env_nowhere.c | 91 | 1501 | /*
* (C) Copyright 2000-2010
* Wolfgang Denk, DENX Software Engineering, wd@denx.de.
*
* (C) Copyright 2001 Sysgo Real-Time Solutions, GmbH <www.elinos.com>
* Andreas Heppel <aheppel@sysgo.de>
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <command.h>
#include <environment.h>
#include <linux/stddef.h>
DECLARE_GLOBAL_DATA_PTR;
env_t *env_ptr = NULL;
extern uchar default_environment[];
void env_relocate_spec(void)
{
}
uchar env_get_char_spec(int index)
{
return ( *((uchar *)(gd->env_addr + index)) );
}
/*
* Initialize Environment use
*
* We are still running from ROM, so data use is limited
*/
int env_init(void)
{
gd->env_addr = (ulong)&default_environment[0];
gd->env_valid = 0;
return (0);
}
| gpl-2.0 |
manisabri/linux-rt-rpi | fs/xfs/libxfs/xfs_bmap_btree.c | 347 | 22957 | /*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
#include "xfs_dinode.h"
/*
* Determine the extent state.
*/
/* ARGSUSED */
STATIC xfs_exntst_t
xfs_extent_state(
xfs_filblks_t blks,
int extent_flag)
{
if (extent_flag) {
ASSERT(blks != 0); /* saved for DMIG */
return XFS_EXT_UNWRITTEN;
}
return XFS_EXT_NORM;
}
/*
* Convert on-disk form of btree root to in-memory form.
*/
void
xfs_bmdr_to_bmbt(
struct xfs_inode *ip,
xfs_bmdr_block_t *dblock,
int dblocklen,
struct xfs_btree_block *rblock,
int rblocklen)
{
struct xfs_mount *mp = ip->i_mount;
int dmxr;
xfs_bmbt_key_t *fkp;
__be64 *fpp;
xfs_bmbt_key_t *tkp;
__be64 *tpp;
if (xfs_sb_version_hascrc(&mp->m_sb))
xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
else
xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
XFS_BTREE_LONG_PTRS);
rblock->bb_level = dblock->bb_level;
ASSERT(be16_to_cpu(rblock->bb_level) > 0);
rblock->bb_numrecs = dblock->bb_numrecs;
dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
dmxr = be16_to_cpu(dblock->bb_numrecs);
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
}
/*
* Convert a compressed bmap extent record to an uncompressed form.
* This code must be in sync with the routines xfs_bmbt_get_startoff,
* xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
*/
STATIC void
__xfs_bmbt_get_all(
__uint64_t l0,
__uint64_t l1,
xfs_bmbt_irec_t *s)
{
int ext_flag;
xfs_exntst_t st;
ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
s->br_startoff = ((xfs_fileoff_t)l0 &
xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
(((xfs_fsblock_t)l1) >> 21);
s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
/* This is xfs_extent_state() in-line */
if (ext_flag) {
ASSERT(s->br_blockcount != 0); /* saved for DMIG */
st = XFS_EXT_UNWRITTEN;
} else
st = XFS_EXT_NORM;
s->br_state = st;
}
void
xfs_bmbt_get_all(
xfs_bmbt_rec_host_t *r,
xfs_bmbt_irec_t *s)
{
__xfs_bmbt_get_all(r->l0, r->l1, s);
}
/*
* Extract the blockcount field from an in memory bmap extent record.
*/
xfs_filblks_t
xfs_bmbt_get_blockcount(
xfs_bmbt_rec_host_t *r)
{
return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
}
/*
* Extract the startblock field from an in memory bmap extent record.
*/
xfs_fsblock_t
xfs_bmbt_get_startblock(
xfs_bmbt_rec_host_t *r)
{
return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
(((xfs_fsblock_t)r->l1) >> 21);
}
/*
* Extract the startoff field from an in memory bmap extent record.
*/
xfs_fileoff_t
xfs_bmbt_get_startoff(
xfs_bmbt_rec_host_t *r)
{
return ((xfs_fileoff_t)r->l0 &
xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
}
xfs_exntst_t
xfs_bmbt_get_state(
xfs_bmbt_rec_host_t *r)
{
int ext_flag;
ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
return xfs_extent_state(xfs_bmbt_get_blockcount(r),
ext_flag);
}
/*
* Extract the blockcount field from an on disk bmap extent record.
*/
xfs_filblks_t
xfs_bmbt_disk_get_blockcount(
xfs_bmbt_rec_t *r)
{
return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
}
/*
* Extract the startoff field from a disk format bmap extent record.
*/
xfs_fileoff_t
xfs_bmbt_disk_get_startoff(
xfs_bmbt_rec_t *r)
{
return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
}
/*
* Set all the fields in a bmap extent record from the arguments.
*/
void
xfs_bmbt_set_allf(
xfs_bmbt_rec_host_t *r,
xfs_fileoff_t startoff,
xfs_fsblock_t startblock,
xfs_filblks_t blockcount,
xfs_exntst_t state)
{
int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) |
((xfs_bmbt_rec_base_t)startblock >> 43);
r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)xfs_mask64lo(21));
}
/*
* Set all the fields in a bmap extent record from the uncompressed form.
*/
void
xfs_bmbt_set_all(
xfs_bmbt_rec_host_t *r,
xfs_bmbt_irec_t *s)
{
xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
s->br_blockcount, s->br_state);
}
/*
* Set all the fields in a disk format bmap extent record from the arguments.
*/
void
xfs_bmbt_disk_set_allf(
xfs_bmbt_rec_t *r,
xfs_fileoff_t startoff,
xfs_fsblock_t startblock,
xfs_filblks_t blockcount,
xfs_exntst_t state)
{
int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
r->l0 = cpu_to_be64(
((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) |
((xfs_bmbt_rec_base_t)startblock >> 43));
r->l1 = cpu_to_be64(
((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
}
/*
* Set all the fields in a bmap extent record from the uncompressed form.
*/
STATIC void
xfs_bmbt_disk_set_all(
xfs_bmbt_rec_t *r,
xfs_bmbt_irec_t *s)
{
xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
s->br_blockcount, s->br_state);
}
/*
* Set the blockcount field in a bmap extent record.
*/
void
xfs_bmbt_set_blockcount(
xfs_bmbt_rec_host_t *r,
xfs_filblks_t v)
{
ASSERT((v & xfs_mask64hi(43)) == 0);
r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
(xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
}
/*
* Set the startblock field in a bmap extent record.
*/
void
xfs_bmbt_set_startblock(
xfs_bmbt_rec_host_t *r,
xfs_fsblock_t v)
{
ASSERT((v & xfs_mask64hi(12)) == 0);
r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
(xfs_bmbt_rec_base_t)(v >> 43);
r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
(xfs_bmbt_rec_base_t)(v << 21);
}
/*
* Set the startoff field in a bmap extent record.
*/
void
xfs_bmbt_set_startoff(
xfs_bmbt_rec_host_t *r,
xfs_fileoff_t v)
{
ASSERT((v & xfs_mask64hi(9)) == 0);
r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
((xfs_bmbt_rec_base_t)v << 9) |
(r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
}
/*
* Set the extent state field in a bmap extent record.
*/
void
xfs_bmbt_set_state(
xfs_bmbt_rec_host_t *r,
xfs_exntst_t v)
{
ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
if (v == XFS_EXT_NORM)
r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
else
r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
}
/*
* Convert in-memory form of btree root to on-disk form.
*/
void
xfs_bmbt_to_bmdr(
struct xfs_mount *mp,
struct xfs_btree_block *rblock,
int rblocklen,
xfs_bmdr_block_t *dblock,
int dblocklen)
{
int dmxr;
xfs_bmbt_key_t *fkp;
__be64 *fpp;
xfs_bmbt_key_t *tkp;
__be64 *tpp;
if (xfs_sb_version_hascrc(&mp->m_sb)) {
ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid));
ASSERT(rblock->bb_u.l.bb_blkno ==
cpu_to_be64(XFS_BUF_DADDR_NULL));
} else
ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
ASSERT(rblock->bb_level != 0);
dblock->bb_level = rblock->bb_level;
dblock->bb_numrecs = rblock->bb_numrecs;
dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
dmxr = be16_to_cpu(dblock->bb_numrecs);
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
}
/*
* Check extent records, which have just been read, for
* any bit in the extent flag field. ASSERT on debug
* kernels, as this condition should not occur.
* Return an error condition (1) if any flags found,
* otherwise return 0.
*/
int
xfs_check_nostate_extents(
xfs_ifork_t *ifp,
xfs_extnum_t idx,
xfs_extnum_t num)
{
for (; num > 0; num--, idx++) {
xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
if ((ep->l0 >>
(64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
ASSERT(0);
return 1;
}
}
return 0;
}
STATIC struct xfs_btree_cur *
xfs_bmbt_dup_cursor(
struct xfs_btree_cur *cur)
{
struct xfs_btree_cur *new;
new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_private.b.ip, cur->bc_private.b.whichfork);
/*
* Copy the firstblock, flist, and flags values,
* since init cursor doesn't get them.
*/
new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
new->bc_private.b.flist = cur->bc_private.b.flist;
new->bc_private.b.flags = cur->bc_private.b.flags;
return new;
}
STATIC void
xfs_bmbt_update_cursor(
struct xfs_btree_cur *src,
struct xfs_btree_cur *dst)
{
ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
(dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
dst->bc_private.b.allocated += src->bc_private.b.allocated;
dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
src->bc_private.b.allocated = 0;
}
STATIC int
xfs_bmbt_alloc_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *start,
union xfs_btree_ptr *new,
int *stat)
{
xfs_alloc_arg_t args; /* block allocation args */
int error; /* error return value */
memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.fsbno = cur->bc_private.b.firstblock;
args.firstblock = args.fsbno;
if (args.fsbno == NULLFSBLOCK) {
args.fsbno = be64_to_cpu(start->l);
args.type = XFS_ALLOCTYPE_START_BNO;
/*
* Make sure there is sufficient room left in the AG to
* complete a full tree split for an extent insert. If
* we are converting the middle part of an extent then
* we may need space for two tree splits.
*
* We are relying on the caller to make the correct block
* reservation for this operation to succeed. If the
* reservation amount is insufficient then we may fail a
* block allocation here and corrupt the filesystem.
*/
args.minleft = xfs_trans_get_block_res(args.tp);
} else if (cur->bc_private.b.flist->xbf_low) {
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
}
args.minlen = args.maxlen = args.prod = 1;
args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
error = -ENOSPC;
goto error0;
}
error = xfs_alloc_vextent(&args);
if (error)
goto error0;
if (args.fsbno == NULLFSBLOCK && args.minleft) {
/*
* Could not find an AG with enough free space to satisfy
* a full btree split. Try again without minleft and if
* successful activate the lowspace algorithm.
*/
args.fsbno = 0;
args.type = XFS_ALLOCTYPE_FIRST_AG;
args.minleft = 0;
error = xfs_alloc_vextent(&args);
if (error)
goto error0;
cur->bc_private.b.flist->xbf_low = 1;
}
if (args.fsbno == NULLFSBLOCK) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
}
ASSERT(args.len == 1);
cur->bc_private.b.firstblock = args.fsbno;
cur->bc_private.b.allocated++;
cur->bc_private.b.ip->i_d.di_nblocks++;
xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
XFS_TRANS_DQ_BCOUNT, 1L);
new->l = cpu_to_be64(args.fsbno);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1;
return 0;
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
}
STATIC int
xfs_bmbt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
struct xfs_mount *mp = cur->bc_mp;
struct xfs_inode *ip = cur->bc_private.b.ip;
struct xfs_trans *tp = cur->bc_tp;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp);
ip->i_d.di_nblocks--;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(tp, bp);
return 0;
}
STATIC int
xfs_bmbt_get_minrecs(
struct xfs_btree_cur *cur,
int level)
{
if (level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp;
ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
cur->bc_private.b.whichfork);
return xfs_bmbt_maxrecs(cur->bc_mp,
ifp->if_broot_bytes, level == 0) / 2;
}
return cur->bc_mp->m_bmap_dmnr[level != 0];
}
int
xfs_bmbt_get_maxrecs(
struct xfs_btree_cur *cur,
int level)
{
if (level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp;
ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
cur->bc_private.b.whichfork);
return xfs_bmbt_maxrecs(cur->bc_mp,
ifp->if_broot_bytes, level == 0);
}
return cur->bc_mp->m_bmap_dmxr[level != 0];
}
/*
* Get the maximum records we could store in the on-disk format.
*
* For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
* for the root node this checks the available space in the dinode fork
* so that we can resize the in-memory buffer to match it. After a
* resize to the maximum size this function returns the same value
* as xfs_bmbt_get_maxrecs for the root node, too.
*/
STATIC int
xfs_bmbt_get_dmaxrecs(
struct xfs_btree_cur *cur,
int level)
{
if (level != cur->bc_nlevels - 1)
return cur->bc_mp->m_bmap_dmxr[level != 0];
return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
}
STATIC void
xfs_bmbt_init_key_from_rec(
union xfs_btree_key *key,
union xfs_btree_rec *rec)
{
key->bmbt.br_startoff =
cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
}
STATIC void
xfs_bmbt_init_rec_from_key(
union xfs_btree_key *key,
union xfs_btree_rec *rec)
{
ASSERT(key->bmbt.br_startoff != 0);
xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
0, 0, XFS_EXT_NORM);
}
STATIC void
xfs_bmbt_init_rec_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
{
xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
}
STATIC void
xfs_bmbt_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
ptr->l = 0;
}
STATIC __int64_t
xfs_bmbt_key_diff(
struct xfs_btree_cur *cur,
union xfs_btree_key *key)
{
return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
cur->bc_rec.b.br_startoff;
}
static bool
xfs_bmbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
unsigned int level;
switch (block->bb_magic) {
case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
if (!xfs_sb_version_hascrc(&mp->m_sb))
return false;
if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid))
return false;
if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn)
return false;
/*
* XXX: need a better way of verifying the owner here. Right now
* just make sure there has been one set.
*/
if (be64_to_cpu(block->bb_u.l.bb_owner) == 0)
return false;
/* fall through */
case cpu_to_be32(XFS_BMAP_MAGIC):
break;
default:
return false;
}
/*
* numrecs and level verification.
*
* We don't know what fork we belong to, so just verify that the level
* is less than the maximum of the two. Later checks will be more
* precise.
*/
level = be16_to_cpu(block->bb_level);
if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
return false;
if (be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
return false;
/* sibling pointer verification */
if (!block->bb_u.l.bb_leftsib ||
(block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
!XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))))
return false;
if (!block->bb_u.l.bb_rightsib ||
(block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
!XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))))
return false;
return true;
}
static void
xfs_bmbt_read_verify(
struct xfs_buf *bp)
{
if (!xfs_btree_lblock_verify_crc(bp))
xfs_buf_ioerror(bp, -EFSBADCRC);
else if (!xfs_bmbt_verify(bp))
xfs_buf_ioerror(bp, -EFSCORRUPTED);
if (bp->b_error) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
xfs_verifier_error(bp);
}
}
static void
xfs_bmbt_write_verify(
struct xfs_buf *bp)
{
if (!xfs_bmbt_verify(bp)) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
xfs_buf_ioerror(bp, -EFSCORRUPTED);
xfs_verifier_error(bp);
return;
}
xfs_btree_lblock_calc_crc(bp);
}
const struct xfs_buf_ops xfs_bmbt_buf_ops = {
.verify_read = xfs_bmbt_read_verify,
.verify_write = xfs_bmbt_write_verify,
};
#if defined(DEBUG) || defined(XFS_WARN)
STATIC int
xfs_bmbt_keys_inorder(
struct xfs_btree_cur *cur,
union xfs_btree_key *k1,
union xfs_btree_key *k2)
{
return be64_to_cpu(k1->bmbt.br_startoff) <
be64_to_cpu(k2->bmbt.br_startoff);
}
STATIC int
xfs_bmbt_recs_inorder(
struct xfs_btree_cur *cur,
union xfs_btree_rec *r1,
union xfs_btree_rec *r2)
{
return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
xfs_bmbt_disk_get_startoff(&r2->bmbt);
}
#endif /* DEBUG */
static const struct xfs_btree_ops xfs_bmbt_ops = {
.rec_len = sizeof(xfs_bmbt_rec_t),
.key_len = sizeof(xfs_bmbt_key_t),
.dup_cursor = xfs_bmbt_dup_cursor,
.update_cursor = xfs_bmbt_update_cursor,
.alloc_block = xfs_bmbt_alloc_block,
.free_block = xfs_bmbt_free_block,
.get_maxrecs = xfs_bmbt_get_maxrecs,
.get_minrecs = xfs_bmbt_get_minrecs,
.get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
.init_key_from_rec = xfs_bmbt_init_key_from_rec,
.init_rec_from_key = xfs_bmbt_init_rec_from_key,
.init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
.key_diff = xfs_bmbt_key_diff,
.buf_ops = &xfs_bmbt_buf_ops,
#if defined(DEBUG) || defined(XFS_WARN)
.keys_inorder = xfs_bmbt_keys_inorder,
.recs_inorder = xfs_bmbt_recs_inorder,
#endif
};
/*
* Allocate a new bmap btree cursor.
*/
struct xfs_btree_cur * /* new bmap btree cursor */
xfs_bmbt_init_cursor(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_inode *ip, /* inode owning the btree */
int whichfork) /* data or attr fork */
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_btree_cur *cur;
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
cur->bc_btnum = XFS_BTNUM_BMAP;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
cur->bc_ops = &xfs_bmbt_ops;
cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
cur->bc_private.b.ip = ip;
cur->bc_private.b.firstblock = NULLFSBLOCK;
cur->bc_private.b.flist = NULL;
cur->bc_private.b.allocated = 0;
cur->bc_private.b.flags = 0;
cur->bc_private.b.whichfork = whichfork;
return cur;
}
/*
* Calculate number of records in a bmap btree block.
*/
int
xfs_bmbt_maxrecs(
struct xfs_mount *mp,
int blocklen,
int leaf)
{
blocklen -= XFS_BMBT_BLOCK_LEN(mp);
if (leaf)
return blocklen / sizeof(xfs_bmbt_rec_t);
return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
}
/*
* Calculate number of records in a bmap btree inode root.
*/
int
xfs_bmdr_maxrecs(
int blocklen,
int leaf)
{
blocklen -= sizeof(xfs_bmdr_block_t);
if (leaf)
return blocklen / sizeof(xfs_bmdr_rec_t);
return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
}
/*
* Change the owner of a btree format fork fo the inode passed in. Change it to
* the owner of that is passed in so that we can change owners before or after
* we switch forks between inodes. The operation that the caller is doing will
* determine whether is needs to change owner before or after the switch.
*
* For demand paged transactional modification, the fork switch should be done
* after reading in all the blocks, modifying them and pinning them in the
* transaction. For modification when the buffers are already pinned in memory,
* the fork switch can be done before changing the owner as we won't need to
* validate the owner until the btree buffers are unpinned and writes can occur
* again.
*
* For recovery based ownership change, there is no transactional context and
* so a buffer list must be supplied so that we can record the buffers that we
* modified for the caller to issue IO on.
*/
int
xfs_bmbt_change_owner(
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
xfs_ino_t new_owner,
struct list_head *buffer_list)
{
struct xfs_btree_cur *cur;
int error;
ASSERT(tp || buffer_list);
ASSERT(!(tp && buffer_list));
if (whichfork == XFS_DATA_FORK)
ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
else
ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
if (!cur)
return -ENOMEM;
error = xfs_btree_change_owner(cur, new_owner, buffer_list);
xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
return error;
}
| gpl-2.0 |
gblues/samsung-gravity-smart-froyo | drivers/acpi/scan.c | 347 | 37425 | /*
* scan.c - support for transforming the ACPI namespace into individual objects
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/signal.h>
#include <linux/kthread.h>
#include <acpi/acpi_drivers.h>
#include "internal.h"
#define _COMPONENT ACPI_BUS_COMPONENT
ACPI_MODULE_NAME("scan");
#define STRUCT_TO_INT(s) (*((int*)&s))
extern struct acpi_device *acpi_root;
#define ACPI_BUS_CLASS "system_bus"
#define ACPI_BUS_HID "LNXSYBUS"
#define ACPI_BUS_DEVICE_NAME "System Bus"
#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
static LIST_HEAD(acpi_device_list);
static LIST_HEAD(acpi_bus_id_list);
DEFINE_MUTEX(acpi_device_lock);
LIST_HEAD(acpi_wakeup_device_list);
struct acpi_device_bus_id{
char bus_id[15];
unsigned int instance_no;
struct list_head node;
};
/*
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
* char *modalias: "acpi:IBM0001:ACPI0001"
*/
static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
int size)
{
int len;
int count;
struct acpi_hardware_id *id;
len = snprintf(modalias, size, "acpi:");
size -= len;
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
count = snprintf(&modalias[len], size, "%s:", id->id);
if (count < 0 || count >= size)
return -EINVAL;
len += count;
size -= count;
}
modalias[len] = '\0';
return len;
}
static ssize_t
acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
int len;
/* Device has no HID and no CID or string is >1024 */
len = create_modalias(acpi_dev, buf, 1024);
if (len <= 0)
return 0;
buf[len++] = '\n';
return len;
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
static void acpi_bus_hot_remove_device(void *context)
{
struct acpi_device *device;
acpi_handle handle = context;
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status = AE_OK;
if (acpi_bus_get_device(handle, &device))
return;
if (!device)
return;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Hot-removing device %s...\n", dev_name(&device->dev)));
if (acpi_bus_trim(device, 1)) {
printk(KERN_ERR PREFIX
"Removing device failed\n");
return;
}
/* power off device */
status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
printk(KERN_WARNING PREFIX
"Power-off device failed\n");
if (device->flags.lockable) {
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = 0;
acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
}
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = 1;
/*
* TBD: _EJD support.
*/
status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
if (ACPI_FAILURE(status))
printk(KERN_WARNING PREFIX
"Eject device failed\n");
return;
}
static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = count;
acpi_status status;
acpi_object_type type = 0;
struct acpi_device *acpi_device = to_acpi_device(d);
if ((!count) || (buf[0] != '1')) {
return -EINVAL;
}
#ifndef FORCE_EJECT
if (acpi_device->driver == NULL) {
ret = -ENODEV;
goto err;
}
#endif
status = acpi_get_type(acpi_device->handle, &type);
if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
ret = -ENODEV;
goto err;
}
acpi_os_hotplug_execute(acpi_bus_hot_remove_device, acpi_device->handle);
err:
return ret;
}
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
static ssize_t
acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
}
static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
static ssize_t
acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
int result;
result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
if (result)
goto end;
result = sprintf(buf, "%s\n", (char*)path.pointer);
kfree(path.pointer);
end:
return result;
}
static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
static int acpi_device_setup_files(struct acpi_device *dev)
{
acpi_status status;
acpi_handle temp;
int result = 0;
/*
* Devices gotten from FADT don't have a "path" attribute
*/
if (dev->handle) {
result = device_create_file(&dev->dev, &dev_attr_path);
if (result)
goto end;
}
result = device_create_file(&dev->dev, &dev_attr_hid);
if (result)
goto end;
result = device_create_file(&dev->dev, &dev_attr_modalias);
if (result)
goto end;
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
*/
status = acpi_get_handle(dev->handle, "_EJ0", &temp);
if (ACPI_SUCCESS(status))
result = device_create_file(&dev->dev, &dev_attr_eject);
end:
return result;
}
static void acpi_device_remove_files(struct acpi_device *dev)
{
acpi_status status;
acpi_handle temp;
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
*/
status = acpi_get_handle(dev->handle, "_EJ0", &temp);
if (ACPI_SUCCESS(status))
device_remove_file(&dev->dev, &dev_attr_eject);
device_remove_file(&dev->dev, &dev_attr_modalias);
device_remove_file(&dev->dev, &dev_attr_hid);
if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}
/* --------------------------------------------------------------------------
ACPI Bus operations
-------------------------------------------------------------------------- */
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
const struct acpi_device_id *id;
struct acpi_hardware_id *hwid;
/*
* If the device is not present, it is unnecessary to load device
* driver for it.
*/
if (!device->status.present)
return -ENODEV;
for (id = ids; id->id[0]; id++)
list_for_each_entry(hwid, &device->pnp.ids, list)
if (!strcmp((char *) id->id, hwid->id))
return 0;
return -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
static void acpi_free_ids(struct acpi_device *device)
{
struct acpi_hardware_id *id, *tmp;
list_for_each_entry_safe(id, tmp, &device->pnp.ids, list) {
kfree(id->id);
kfree(id);
}
}
static void acpi_device_release(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_free_ids(acpi_dev);
kfree(acpi_dev);
}
static int acpi_device_suspend(struct device *dev, pm_message_t state)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = acpi_dev->driver;
if (acpi_drv && acpi_drv->ops.suspend)
return acpi_drv->ops.suspend(acpi_dev, state);
return 0;
}
static int acpi_device_resume(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = acpi_dev->driver;
if (acpi_drv && acpi_drv->ops.resume)
return acpi_drv->ops.resume(acpi_dev);
return 0;
}
static int acpi_bus_match(struct device *dev, struct device_driver *drv)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(drv);
return !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
}
static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
int len;
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
env->buflen += len;
return 0;
}
static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *device = data;
device->driver->ops.notify(device, event);
}
static acpi_status acpi_device_notify_fixed(void *data)
{
struct acpi_device *device = data;
/* Fixed hardware devices have no handles */
acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
return AE_OK;
}
static int acpi_device_install_notify_handler(struct acpi_device *device)
{
acpi_status status;
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
status =
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_notify_fixed,
device);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
status =
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_notify_fixed,
device);
else
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY,
acpi_device_notify,
device);
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
static void acpi_device_remove_notify_handler(struct acpi_device *device)
{
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_notify_fixed);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_notify_fixed);
else
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
acpi_device_notify);
}
static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *);
static int acpi_start_single_object(struct acpi_device *);
static int acpi_device_probe(struct device * dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
int ret;
ret = acpi_bus_driver_init(acpi_dev, acpi_drv);
if (!ret) {
if (acpi_dev->bus_ops.acpi_op_start)
acpi_start_single_object(acpi_dev);
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
if (ret) {
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev,
acpi_dev->removal_type);
return ret;
}
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Found driver [%s] for device [%s]\n",
acpi_drv->name, acpi_dev->pnp.bus_id));
get_device(dev);
}
return ret;
}
static int acpi_device_remove(struct device * dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = acpi_dev->driver;
if (acpi_drv) {
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
}
acpi_dev->driver = NULL;
acpi_dev->driver_data = NULL;
put_device(dev);
return 0;
}
struct bus_type acpi_bus_type = {
.name = "acpi",
.suspend = acpi_device_suspend,
.resume = acpi_device_resume,
.match = acpi_bus_match,
.probe = acpi_device_probe,
.remove = acpi_device_remove,
.uevent = acpi_device_uevent,
};
static int acpi_device_register(struct acpi_device *device)
{
int result;
struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
int found = 0;
/*
* Linkage
* -------
* Link this device to its parent and siblings.
*/
INIT_LIST_HEAD(&device->children);
INIT_LIST_HEAD(&device->node);
INIT_LIST_HEAD(&device->wakeup_list);
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
if (!new_bus_id) {
printk(KERN_ERR PREFIX "Memory allocation error\n");
return -ENOMEM;
}
mutex_lock(&acpi_device_lock);
/*
* Find suitable bus_id and instance number in acpi_bus_id_list
* If failed, create one and link it into acpi_bus_id_list
*/
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
if (!strcmp(acpi_device_bus_id->bus_id,
acpi_device_hid(device))) {
acpi_device_bus_id->instance_no++;
found = 1;
kfree(new_bus_id);
break;
}
}
if (!found) {
acpi_device_bus_id = new_bus_id;
strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
acpi_device_bus_id->instance_no = 0;
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
if (device->parent)
list_add_tail(&device->node, &device->parent->children);
if (device->wakeup.flags.valid)
list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
mutex_unlock(&acpi_device_lock);
if (device->parent)
device->dev.parent = &device->parent->dev;
device->dev.bus = &acpi_bus_type;
device->dev.release = &acpi_device_release;
result = device_register(&device->dev);
if (result) {
dev_err(&device->dev, "Error registering device\n");
goto end;
}
result = acpi_device_setup_files(device);
if (result)
printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
dev_name(&device->dev));
device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
return 0;
end:
mutex_lock(&acpi_device_lock);
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
mutex_unlock(&acpi_device_lock);
return result;
}
static void acpi_device_unregister(struct acpi_device *device, int type)
{
mutex_lock(&acpi_device_lock);
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
mutex_unlock(&acpi_device_lock);
acpi_detach_data(device->handle, acpi_bus_data_handler);
acpi_device_remove_files(device);
device_unregister(&device->dev);
}
/* --------------------------------------------------------------------------
Driver Management
-------------------------------------------------------------------------- */
/**
* acpi_bus_driver_init - add a device to a driver
* @device: the device to add and initialize
* @driver: driver for the device
*
* Used to initialize a device via its device driver. Called whenever a
* driver is bound to a device. Invokes the driver's add() ops.
*/
static int
acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
{
int result = 0;
if (!device || !driver)
return -EINVAL;
if (!driver->ops.add)
return -ENOSYS;
result = driver->ops.add(device);
if (result) {
device->driver = NULL;
device->driver_data = NULL;
return result;
}
device->driver = driver;
/*
* TBD - Configuration Management: Assign resources to device based
* upon possible configuration and currently allocated resources.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Driver successfully bound to device\n"));
return 0;
}
static int acpi_start_single_object(struct acpi_device *device)
{
int result = 0;
struct acpi_driver *driver;
if (!(driver = device->driver))
return 0;
if (driver->ops.start) {
result = driver->ops.start(device);
if (result && driver->ops.remove)
driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
}
return result;
}
/**
* acpi_bus_register_driver - register a driver with the ACPI bus
* @driver: driver being registered
*
* Registers a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and binds. Returns zero for
* success or a negative error status for failure.
*/
int acpi_bus_register_driver(struct acpi_driver *driver)
{
int ret;
if (acpi_disabled)
return -ENODEV;
driver->drv.name = driver->name;
driver->drv.bus = &acpi_bus_type;
driver->drv.owner = driver->owner;
ret = driver_register(&driver->drv);
return ret;
}
EXPORT_SYMBOL(acpi_bus_register_driver);
/**
* acpi_bus_unregister_driver - unregisters a driver with the APIC bus
* @driver: driver to unregister
*
* Unregisters a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and unbinds.
*/
void acpi_bus_unregister_driver(struct acpi_driver *driver)
{
driver_unregister(&driver->drv);
}
EXPORT_SYMBOL(acpi_bus_unregister_driver);
/* --------------------------------------------------------------------------
Device Enumeration
-------------------------------------------------------------------------- */
static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
{
acpi_status status;
int ret;
struct acpi_device *device;
/*
* Fixed hardware devices do not appear in the namespace and do not
* have handles, but we fabricate acpi_devices for them, so we have
* to deal with them specially.
*/
if (handle == NULL)
return acpi_root;
do {
status = acpi_get_parent(handle, &handle);
if (status == AE_NULL_ENTRY)
return NULL;
if (ACPI_FAILURE(status))
return acpi_root;
ret = acpi_bus_get_device(handle, &device);
if (ret == 0)
return device;
} while (1);
}
acpi_status
acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
{
acpi_status status;
acpi_handle tmp;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *obj;
status = acpi_get_handle(handle, "_EJD", &tmp);
if (ACPI_FAILURE(status))
return status;
status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer);
if (ACPI_SUCCESS(status)) {
obj = buffer.pointer;
status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer,
ejd);
kfree(buffer.pointer);
}
return status;
}
EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
void acpi_bus_data_handler(acpi_handle handle, void *context)
{
/* TBD */
return;
}
static int acpi_bus_get_perf_flags(struct acpi_device *device)
{
device->performance.state = ACPI_STATE_UNKNOWN;
return 0;
}
static acpi_status
acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
union acpi_object *package)
{
int i = 0;
union acpi_object *element = NULL;
if (!device || !package || (package->package.count < 2))
return AE_BAD_PARAMETER;
element = &(package->package.elements[0]);
if (!element)
return AE_BAD_PARAMETER;
if (element->type == ACPI_TYPE_PACKAGE) {
if ((element->package.count < 2) ||
(element->package.elements[0].type !=
ACPI_TYPE_LOCAL_REFERENCE)
|| (element->package.elements[1].type != ACPI_TYPE_INTEGER))
return AE_BAD_DATA;
device->wakeup.gpe_device =
element->package.elements[0].reference.handle;
device->wakeup.gpe_number =
(u32) element->package.elements[1].integer.value;
} else if (element->type == ACPI_TYPE_INTEGER) {
device->wakeup.gpe_number = element->integer.value;
} else
return AE_BAD_DATA;
element = &(package->package.elements[1]);
if (element->type != ACPI_TYPE_INTEGER) {
return AE_BAD_DATA;
}
device->wakeup.sleep_state = element->integer.value;
if ((package->package.count - 2) > ACPI_MAX_HANDLES) {
return AE_NO_MEMORY;
}
device->wakeup.resources.count = package->package.count - 2;
for (i = 0; i < device->wakeup.resources.count; i++) {
element = &(package->package.elements[i + 2]);
if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
return AE_BAD_DATA;
device->wakeup.resources.handles[i] = element->reference.handle;
}
return AE_OK;
}
static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
{
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
int psw_error;
struct acpi_device_id button_device_ids[] = {
{"PNP0C0D", 0},
{"PNP0C0C", 0},
{"PNP0C0E", 0},
{"", 0},
};
/* _PRW */
status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
goto end;
}
package = (union acpi_object *)buffer.pointer;
status = acpi_bus_extract_wakeup_device_power_package(device, package);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
goto end;
}
kfree(buffer.pointer);
device->wakeup.flags.valid = 1;
device->wakeup.prepare_count = 0;
/* Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object.
* The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
* So it is necessary to call _DSW object first. Only when it is not
* present will the _PSW object used.
*/
psw_error = acpi_device_sleep_wake(device, 0, 0, 0);
if (psw_error)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"error in _DSW or _PSW evaluation\n"));
/* Power button, Lid switch always enable wakeup */
if (!acpi_match_device_ids(device, button_device_ids))
device->wakeup.flags.run_wake = 1;
end:
if (ACPI_FAILURE(status))
device->flags.wake_capable = 0;
return 0;
}
static int acpi_bus_get_power_flags(struct acpi_device *device)
{
acpi_status status = 0;
acpi_handle handle = NULL;
u32 i = 0;
/*
* Power Management Flags
*/
status = acpi_get_handle(device->handle, "_PSC", &handle);
if (ACPI_SUCCESS(status))
device->power.flags.explicit_get = 1;
status = acpi_get_handle(device->handle, "_IRC", &handle);
if (ACPI_SUCCESS(status))
device->power.flags.inrush_current = 1;
/*
* Enumerate supported power management states
*/
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
struct acpi_device_power_state *ps = &device->power.states[i];
char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
/* Evaluate "_PRx" to se if power resources are referenced */
acpi_evaluate_reference(device->handle, object_name, NULL,
&ps->resources);
if (ps->resources.count) {
device->power.flags.power_resources = 1;
ps->flags.valid = 1;
}
/* Evaluate "_PSx" to see if we can do explicit sets */
object_name[2] = 'S';
status = acpi_get_handle(device->handle, object_name, &handle);
if (ACPI_SUCCESS(status)) {
ps->flags.explicit_set = 1;
ps->flags.valid = 1;
}
/* State is valid if we have some power control */
if (ps->resources.count || ps->flags.explicit_set)
ps->flags.valid = 1;
ps->power = -1; /* Unknown - driver assigned */
ps->latency = -1; /* Unknown - driver assigned */
}
/* Set defaults for D0 and D3 states (always valid) */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
device->power.states[ACPI_STATE_D0].power = 100;
device->power.states[ACPI_STATE_D3].flags.valid = 1;
device->power.states[ACPI_STATE_D3].power = 0;
/* TBD: System wake support and resource requirements. */
device->power.state = ACPI_STATE_UNKNOWN;
acpi_bus_get_power(device->handle, &(device->power.state));
return 0;
}
static int acpi_bus_get_flags(struct acpi_device *device)
{
acpi_status status = AE_OK;
acpi_handle temp = NULL;
/* Presence of _STA indicates 'dynamic_status' */
status = acpi_get_handle(device->handle, "_STA", &temp);
if (ACPI_SUCCESS(status))
device->flags.dynamic_status = 1;
/* Presence of _RMV indicates 'removable' */
status = acpi_get_handle(device->handle, "_RMV", &temp);
if (ACPI_SUCCESS(status))
device->flags.removable = 1;
/* Presence of _EJD|_EJ0 indicates 'ejectable' */
status = acpi_get_handle(device->handle, "_EJD", &temp);
if (ACPI_SUCCESS(status))
device->flags.ejectable = 1;
else {
status = acpi_get_handle(device->handle, "_EJ0", &temp);
if (ACPI_SUCCESS(status))
device->flags.ejectable = 1;
}
/* Presence of _LCK indicates 'lockable' */
status = acpi_get_handle(device->handle, "_LCK", &temp);
if (ACPI_SUCCESS(status))
device->flags.lockable = 1;
/* Presence of _PS0|_PR0 indicates 'power manageable' */
status = acpi_get_handle(device->handle, "_PS0", &temp);
if (ACPI_FAILURE(status))
status = acpi_get_handle(device->handle, "_PR0", &temp);
if (ACPI_SUCCESS(status))
device->flags.power_manageable = 1;
/* Presence of _PRW indicates wake capable */
status = acpi_get_handle(device->handle, "_PRW", &temp);
if (ACPI_SUCCESS(status))
device->flags.wake_capable = 1;
/* TBD: Performance management */
return 0;
}
static void acpi_device_get_busid(struct acpi_device *device)
{
char bus_id[5] = { '?', 0 };
struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
int i = 0;
/*
* Bus ID
* ------
* The device's Bus ID is simply the object name.
* TBD: Shouldn't this value be unique (within the ACPI namespace)?
*/
if (ACPI_IS_ROOT_DEVICE(device)) {
strcpy(device->pnp.bus_id, "ACPI");
return;
}
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
strcpy(device->pnp.bus_id, "PWRF");
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
strcpy(device->pnp.bus_id, "SLPF");
break;
default:
acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
/* Clean up trailing underscores (if any) */
for (i = 3; i > 1; i--) {
if (bus_id[i] == '_')
bus_id[i] = '\0';
else
break;
}
strcpy(device->pnp.bus_id, bus_id);
break;
}
}
/*
* acpi_bay_match - see if a device is an ejectable driver bay
*
* If an acpi object is ejectable and has one of the ACPI ATA methods defined,
* then we can safely call it an ejectable drive bay
*/
static int acpi_bay_match(struct acpi_device *device){
acpi_status status;
acpi_handle handle;
acpi_handle tmp;
acpi_handle phandle;
handle = device->handle;
status = acpi_get_handle(handle, "_EJ0", &tmp);
if (ACPI_FAILURE(status))
return -ENODEV;
if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
return 0;
if (acpi_get_parent(handle, &phandle))
return -ENODEV;
if ((ACPI_SUCCESS(acpi_get_handle(phandle, "_GTF", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(phandle, "_GTM", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(phandle, "_STM", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(phandle, "_SDD", &tmp))))
return 0;
return -ENODEV;
}
/*
* acpi_dock_match - see if a device has a _DCK method
*/
static int acpi_dock_match(struct acpi_device *device)
{
acpi_handle tmp;
return acpi_get_handle(device->handle, "_DCK", &tmp);
}
char *acpi_device_hid(struct acpi_device *device)
{
struct acpi_hardware_id *hid;
hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
return hid->id;
}
EXPORT_SYMBOL(acpi_device_hid);
static void acpi_add_id(struct acpi_device *device, const char *dev_id)
{
struct acpi_hardware_id *id;
id = kmalloc(sizeof(*id), GFP_KERNEL);
if (!id)
return;
id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL);
if (!id->id) {
kfree(id);
return;
}
strcpy(id->id, dev_id);
list_add_tail(&id->list, &device->pnp.ids);
}
static void acpi_device_set_id(struct acpi_device *device)
{
acpi_status status;
struct acpi_device_info *info;
struct acpica_device_id_list *cid_list;
int i;
switch (device->device_type) {
case ACPI_BUS_TYPE_DEVICE:
if (ACPI_IS_ROOT_DEVICE(device)) {
acpi_add_id(device, ACPI_SYSTEM_HID);
break;
} else if (ACPI_IS_ROOT_DEVICE(device->parent)) {
/* \_SB_, the only root-level namespace device */
acpi_add_id(device, ACPI_BUS_HID);
strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
break;
}
status = acpi_get_object_info(device->handle, &info);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__);
return;
}
if (info->valid & ACPI_VALID_HID)
acpi_add_id(device, info->hardware_id.string);
if (info->valid & ACPI_VALID_CID) {
cid_list = &info->compatible_id_list;
for (i = 0; i < cid_list->count; i++)
acpi_add_id(device, cid_list->ids[i].string);
}
if (info->valid & ACPI_VALID_ADR) {
device->pnp.bus_address = info->address;
device->flags.bus_address = 1;
}
kfree(info);
/*
* Some devices don't reliably have _HIDs & _CIDs, so add
* synthetic HIDs to make sure drivers can find them.
*/
if (acpi_is_video_device(device))
acpi_add_id(device, ACPI_VIDEO_HID);
else if (ACPI_SUCCESS(acpi_bay_match(device)))
acpi_add_id(device, ACPI_BAY_HID);
else if (ACPI_SUCCESS(acpi_dock_match(device)))
acpi_add_id(device, ACPI_DOCK_HID);
break;
case ACPI_BUS_TYPE_POWER:
acpi_add_id(device, ACPI_POWER_HID);
break;
case ACPI_BUS_TYPE_PROCESSOR:
acpi_add_id(device, ACPI_PROCESSOR_OBJECT_HID);
break;
case ACPI_BUS_TYPE_THERMAL:
acpi_add_id(device, ACPI_THERMAL_HID);
break;
case ACPI_BUS_TYPE_POWER_BUTTON:
acpi_add_id(device, ACPI_BUTTON_HID_POWERF);
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
break;
}
/*
* We build acpi_devices for some objects that don't have _HID or _CID,
* e.g., PCI bridges and slots. Drivers can't bind to these objects,
* but we do use them indirectly by traversing the acpi_device tree.
* This generic ID isn't useful for driver binding, but it provides
* the useful property that "every acpi_device has an ID."
*/
if (list_empty(&device->pnp.ids))
acpi_add_id(device, "device");
}
static int acpi_device_set_context(struct acpi_device *device)
{
acpi_status status;
/*
* Context
* -------
* Attach this 'struct acpi_device' to the ACPI object. This makes
* resolutions from handle->device very efficient. Fixed hardware
* devices have no handles, so we skip them.
*/
if (!device->handle)
return 0;
status = acpi_attach_data(device->handle,
acpi_bus_data_handler, device);
if (ACPI_SUCCESS(status))
return 0;
printk(KERN_ERR PREFIX "Error attaching device data\n");
return -ENODEV;
}
static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
{
if (!dev)
return -EINVAL;
dev->removal_type = ACPI_BUS_REMOVAL_EJECT;
device_release_driver(&dev->dev);
if (!rmdevice)
return 0;
/*
* unbind _ADR-Based Devices when hot removal
*/
if (dev->flags.bus_address) {
if ((dev->parent) && (dev->parent->ops.unbind))
dev->parent->ops.unbind(dev);
}
acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT);
return 0;
}
static int acpi_add_single_object(struct acpi_device **child,
acpi_handle handle, int type,
unsigned long long sta,
struct acpi_bus_ops *ops)
{
int result;
struct acpi_device *device;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
if (!device) {
printk(KERN_ERR PREFIX "Memory allocation error\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&device->pnp.ids);
device->device_type = type;
device->handle = handle;
device->parent = acpi_bus_get_parent(handle);
device->bus_ops = *ops; /* workround for not call .start */
STRUCT_TO_INT(device->status) = sta;
acpi_device_get_busid(device);
/*
* Flags
* -----
* Note that we only look for object handles -- cannot evaluate objects
* until we know the device is present and properly initialized.
*/
result = acpi_bus_get_flags(device);
if (result)
goto end;
/*
* Initialize Device
* -----------------
* TBD: Synch with Core's enumeration/initialization process.
*/
acpi_device_set_id(device);
/*
* Power Management
* ----------------
*/
if (device->flags.power_manageable) {
result = acpi_bus_get_power_flags(device);
if (result)
goto end;
}
/*
* Wakeup device management
*-----------------------
*/
if (device->flags.wake_capable) {
result = acpi_bus_get_wakeup_device_flags(device);
if (result)
goto end;
}
/*
* Performance Management
* ----------------------
*/
if (device->flags.performance_manageable) {
result = acpi_bus_get_perf_flags(device);
if (result)
goto end;
}
if ((result = acpi_device_set_context(device)))
goto end;
result = acpi_device_register(device);
/*
* Bind _ADR-Based Devices when hot add
*/
if (device->flags.bus_address) {
if (device->parent && device->parent->ops.bind)
device->parent->ops.bind(device);
}
end:
if (!result) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Adding %s [%s] parent %s\n", dev_name(&device->dev),
(char *) buffer.pointer,
device->parent ? dev_name(&device->parent->dev) :
"(null)"));
kfree(buffer.pointer);
*child = device;
} else
acpi_device_release(&device->dev);
return result;
}
#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
static int acpi_bus_type_and_status(acpi_handle handle, int *type,
unsigned long long *sta)
{
acpi_status status;
acpi_object_type acpi_type;
status = acpi_get_type(handle, &acpi_type);
if (ACPI_FAILURE(status))
return -ENODEV;
switch (acpi_type) {
case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
case ACPI_TYPE_DEVICE:
*type = ACPI_BUS_TYPE_DEVICE;
status = acpi_bus_get_status_handle(handle, sta);
if (ACPI_FAILURE(status))
return -ENODEV;
break;
case ACPI_TYPE_PROCESSOR:
*type = ACPI_BUS_TYPE_PROCESSOR;
status = acpi_bus_get_status_handle(handle, sta);
if (ACPI_FAILURE(status))
return -ENODEV;
break;
case ACPI_TYPE_THERMAL:
*type = ACPI_BUS_TYPE_THERMAL;
*sta = ACPI_STA_DEFAULT;
break;
case ACPI_TYPE_POWER:
*type = ACPI_BUS_TYPE_POWER;
*sta = ACPI_STA_DEFAULT;
break;
default:
return -ENODEV;
}
return 0;
}
static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
void *context, void **return_value)
{
struct acpi_bus_ops *ops = context;
int type;
unsigned long long sta;
struct acpi_device *device;
acpi_status status;
int result;
result = acpi_bus_type_and_status(handle, &type, &sta);
if (result)
return AE_OK;
if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
!(sta & ACPI_STA_DEVICE_FUNCTIONING))
return AE_CTRL_DEPTH;
/*
* We may already have an acpi_device from a previous enumeration. If
* so, we needn't add it again, but we may still have to start it.
*/
device = NULL;
acpi_bus_get_device(handle, &device);
if (ops->acpi_op_add && !device)
acpi_add_single_object(&device, handle, type, sta, ops);
if (!device)
return AE_CTRL_DEPTH;
if (ops->acpi_op_start && !(ops->acpi_op_add)) {
status = acpi_start_single_object(device);
if (ACPI_FAILURE(status))
return AE_CTRL_DEPTH;
}
if (!*return_value)
*return_value = device;
return AE_OK;
}
static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
struct acpi_device **child)
{
acpi_status status;
void *device = NULL;
status = acpi_bus_check_add(handle, 0, ops, &device);
if (ACPI_SUCCESS(status))
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_check_add, ops, &device);
if (child)
*child = device;
return 0;
}
int
acpi_bus_add(struct acpi_device **child,
struct acpi_device *parent, acpi_handle handle, int type)
{
struct acpi_bus_ops ops;
memset(&ops, 0, sizeof(ops));
ops.acpi_op_add = 1;
acpi_bus_scan(handle, &ops, child);
return 0;
}
EXPORT_SYMBOL(acpi_bus_add);
int acpi_bus_start(struct acpi_device *device)
{
struct acpi_bus_ops ops;
if (!device)
return -EINVAL;
memset(&ops, 0, sizeof(ops));
ops.acpi_op_start = 1;
acpi_bus_scan(device->handle, &ops, NULL);
return 0;
}
EXPORT_SYMBOL(acpi_bus_start);
int acpi_bus_trim(struct acpi_device *start, int rmdevice)
{
acpi_status status;
struct acpi_device *parent, *child;
acpi_handle phandle, chandle;
acpi_object_type type;
u32 level = 1;
int err = 0;
parent = start;
phandle = start->handle;
child = chandle = NULL;
while ((level > 0) && parent && (!err)) {
status = acpi_get_next_object(ACPI_TYPE_ANY, phandle,
chandle, &chandle);
/*
* If this scope is exhausted then move our way back up.
*/
if (ACPI_FAILURE(status)) {
level--;
chandle = phandle;
acpi_get_parent(phandle, &phandle);
child = parent;
parent = parent->parent;
if (level == 0)
err = acpi_bus_remove(child, rmdevice);
else
err = acpi_bus_remove(child, 1);
continue;
}
status = acpi_get_type(chandle, &type);
if (ACPI_FAILURE(status)) {
continue;
}
/*
* If there is a device corresponding to chandle then
* parse it (depth-first).
*/
if (acpi_bus_get_device(chandle, &child) == 0) {
level++;
phandle = chandle;
chandle = NULL;
parent = child;
}
continue;
}
return err;
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
static int acpi_bus_scan_fixed(void)
{
int result = 0;
struct acpi_device *device = NULL;
struct acpi_bus_ops ops;
memset(&ops, 0, sizeof(ops));
ops.acpi_op_add = 1;
ops.acpi_op_start = 1;
/*
* Enumerate all fixed-feature devices.
*/
if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
result = acpi_add_single_object(&device, NULL,
ACPI_BUS_TYPE_POWER_BUTTON,
ACPI_STA_DEFAULT,
&ops);
}
if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
result = acpi_add_single_object(&device, NULL,
ACPI_BUS_TYPE_SLEEP_BUTTON,
ACPI_STA_DEFAULT,
&ops);
}
return result;
}
int __init acpi_scan_init(void)
{
int result;
struct acpi_bus_ops ops;
memset(&ops, 0, sizeof(ops));
ops.acpi_op_add = 1;
ops.acpi_op_start = 1;
result = bus_register(&acpi_bus_type);
if (result) {
/* We don't want to quit even if we failed to add suspend/resume */
printk(KERN_ERR PREFIX "Could not register bus type\n");
}
/*
* Enumerate devices in the ACPI namespace.
*/
result = acpi_bus_scan(ACPI_ROOT_OBJECT, &ops, &acpi_root);
if (!result)
result = acpi_bus_scan_fixed();
if (result)
acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
return result;
}
| gpl-2.0 |
Chibaibuki/TCP-IP-Timer-For-Linux-Kernel | drivers/usb/phy/phy-mv-usb.c | 2139 | 21184 | /*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <chao.xie@marvell.com>
* Neil Zhang <zhangwm@marvell.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/device.h>
#include <linux/proc_fs.h>
#include <linux/clk.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/otg.h>
#include <linux/usb/gadget.h>
#include <linux/usb/hcd.h>
#include <linux/platform_data/mv_usb.h>
#include "phy-mv-usb.h"
#define DRIVER_DESC "Marvell USB OTG transceiver driver"
#define DRIVER_VERSION "Jan 20, 2010"
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
static const char driver_name[] = "mv-otg";
static char *state_string[] = {
"undefined",
"b_idle",
"b_srp_init",
"b_peripheral",
"b_wait_acon",
"b_host",
"a_idle",
"a_wait_vrise",
"a_wait_bcon",
"a_host",
"a_suspend",
"a_peripheral",
"a_wait_vfall",
"a_vbus_err"
};
static int mv_otg_set_vbus(struct usb_otg *otg, bool on)
{
struct mv_otg *mvotg = container_of(otg->phy, struct mv_otg, phy);
if (mvotg->pdata->set_vbus == NULL)
return -ENODEV;
return mvotg->pdata->set_vbus(on);
}
static int mv_otg_set_host(struct usb_otg *otg,
struct usb_bus *host)
{
otg->host = host;
return 0;
}
static int mv_otg_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
otg->gadget = gadget;
return 0;
}
static void mv_otg_run_state_machine(struct mv_otg *mvotg,
unsigned long delay)
{
dev_dbg(&mvotg->pdev->dev, "transceiver is updated\n");
if (!mvotg->qwork)
return;
queue_delayed_work(mvotg->qwork, &mvotg->work, delay);
}
static void mv_otg_timer_await_bcon(unsigned long data)
{
struct mv_otg *mvotg = (struct mv_otg *) data;
mvotg->otg_ctrl.a_wait_bcon_timeout = 1;
dev_info(&mvotg->pdev->dev, "B Device No Response!\n");
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
}
static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id)
{
struct timer_list *timer;
if (id >= OTG_TIMER_NUM)
return -EINVAL;
timer = &mvotg->otg_ctrl.timer[id];
if (timer_pending(timer))
del_timer(timer);
return 0;
}
static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
unsigned long interval,
void (*callback) (unsigned long))
{
struct timer_list *timer;
if (id >= OTG_TIMER_NUM)
return -EINVAL;
timer = &mvotg->otg_ctrl.timer[id];
if (timer_pending(timer)) {
dev_err(&mvotg->pdev->dev, "Timer%d is already running\n", id);
return -EBUSY;
}
init_timer(timer);
timer->data = (unsigned long) mvotg;
timer->function = callback;
timer->expires = jiffies + interval;
add_timer(timer);
return 0;
}
static int mv_otg_reset(struct mv_otg *mvotg)
{
unsigned int loops;
u32 tmp;
/* Stop the controller */
tmp = readl(&mvotg->op_regs->usbcmd);
tmp &= ~USBCMD_RUN_STOP;
writel(tmp, &mvotg->op_regs->usbcmd);
/* Reset the controller to get default values */
writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd);
loops = 500;
while (readl(&mvotg->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
if (loops == 0) {
dev_err(&mvotg->pdev->dev,
"Wait for RESET completed TIMEOUT\n");
return -ETIMEDOUT;
}
loops--;
udelay(20);
}
writel(0x0, &mvotg->op_regs->usbintr);
tmp = readl(&mvotg->op_regs->usbsts);
writel(tmp, &mvotg->op_regs->usbsts);
return 0;
}
static void mv_otg_init_irq(struct mv_otg *mvotg)
{
u32 otgsc;
mvotg->irq_en = OTGSC_INTR_A_SESSION_VALID
| OTGSC_INTR_A_VBUS_VALID;
mvotg->irq_status = OTGSC_INTSTS_A_SESSION_VALID
| OTGSC_INTSTS_A_VBUS_VALID;
if (mvotg->pdata->vbus == NULL) {
mvotg->irq_en |= OTGSC_INTR_B_SESSION_VALID
| OTGSC_INTR_B_SESSION_END;
mvotg->irq_status |= OTGSC_INTSTS_B_SESSION_VALID
| OTGSC_INTSTS_B_SESSION_END;
}
if (mvotg->pdata->id == NULL) {
mvotg->irq_en |= OTGSC_INTR_USB_ID;
mvotg->irq_status |= OTGSC_INTSTS_USB_ID;
}
otgsc = readl(&mvotg->op_regs->otgsc);
otgsc |= mvotg->irq_en;
writel(otgsc, &mvotg->op_regs->otgsc);
}
static void mv_otg_start_host(struct mv_otg *mvotg, int on)
{
#ifdef CONFIG_USB
struct usb_otg *otg = mvotg->phy.otg;
struct usb_hcd *hcd;
if (!otg->host)
return;
dev_info(&mvotg->pdev->dev, "%s host\n", on ? "start" : "stop");
hcd = bus_to_hcd(otg->host);
if (on)
usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
else
usb_remove_hcd(hcd);
#endif /* CONFIG_USB */
}
static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on)
{
struct usb_otg *otg = mvotg->phy.otg;
if (!otg->gadget)
return;
dev_info(mvotg->phy.dev, "gadget %s\n", on ? "on" : "off");
if (on)
usb_gadget_vbus_connect(otg->gadget);
else
usb_gadget_vbus_disconnect(otg->gadget);
}
static void otg_clock_enable(struct mv_otg *mvotg)
{
clk_prepare_enable(mvotg->clk);
}
static void otg_clock_disable(struct mv_otg *mvotg)
{
clk_disable_unprepare(mvotg->clk);
}
static int mv_otg_enable_internal(struct mv_otg *mvotg)
{
int retval = 0;
if (mvotg->active)
return 0;
dev_dbg(&mvotg->pdev->dev, "otg enabled\n");
otg_clock_enable(mvotg);
if (mvotg->pdata->phy_init) {
retval = mvotg->pdata->phy_init(mvotg->phy_regs);
if (retval) {
dev_err(&mvotg->pdev->dev,
"init phy error %d\n", retval);
otg_clock_disable(mvotg);
return retval;
}
}
mvotg->active = 1;
return 0;
}
static int mv_otg_enable(struct mv_otg *mvotg)
{
if (mvotg->clock_gating)
return mv_otg_enable_internal(mvotg);
return 0;
}
static void mv_otg_disable_internal(struct mv_otg *mvotg)
{
if (mvotg->active) {
dev_dbg(&mvotg->pdev->dev, "otg disabled\n");
if (mvotg->pdata->phy_deinit)
mvotg->pdata->phy_deinit(mvotg->phy_regs);
otg_clock_disable(mvotg);
mvotg->active = 0;
}
}
static void mv_otg_disable(struct mv_otg *mvotg)
{
if (mvotg->clock_gating)
mv_otg_disable_internal(mvotg);
}
static void mv_otg_update_inputs(struct mv_otg *mvotg)
{
struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
u32 otgsc;
otgsc = readl(&mvotg->op_regs->otgsc);
if (mvotg->pdata->vbus) {
if (mvotg->pdata->vbus->poll() == VBUS_HIGH) {
otg_ctrl->b_sess_vld = 1;
otg_ctrl->b_sess_end = 0;
} else {
otg_ctrl->b_sess_vld = 0;
otg_ctrl->b_sess_end = 1;
}
} else {
otg_ctrl->b_sess_vld = !!(otgsc & OTGSC_STS_B_SESSION_VALID);
otg_ctrl->b_sess_end = !!(otgsc & OTGSC_STS_B_SESSION_END);
}
if (mvotg->pdata->id)
otg_ctrl->id = !!mvotg->pdata->id->poll();
else
otg_ctrl->id = !!(otgsc & OTGSC_STS_USB_ID);
if (mvotg->pdata->otg_force_a_bus_req && !otg_ctrl->id)
otg_ctrl->a_bus_req = 1;
otg_ctrl->a_sess_vld = !!(otgsc & OTGSC_STS_A_SESSION_VALID);
otg_ctrl->a_vbus_vld = !!(otgsc & OTGSC_STS_A_VBUS_VALID);
dev_dbg(&mvotg->pdev->dev, "%s: ", __func__);
dev_dbg(&mvotg->pdev->dev, "id %d\n", otg_ctrl->id);
dev_dbg(&mvotg->pdev->dev, "b_sess_vld %d\n", otg_ctrl->b_sess_vld);
dev_dbg(&mvotg->pdev->dev, "b_sess_end %d\n", otg_ctrl->b_sess_end);
dev_dbg(&mvotg->pdev->dev, "a_vbus_vld %d\n", otg_ctrl->a_vbus_vld);
dev_dbg(&mvotg->pdev->dev, "a_sess_vld %d\n", otg_ctrl->a_sess_vld);
}
static void mv_otg_update_state(struct mv_otg *mvotg)
{
struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
struct usb_phy *phy = &mvotg->phy;
int old_state = phy->state;
switch (old_state) {
case OTG_STATE_UNDEFINED:
phy->state = OTG_STATE_B_IDLE;
/* FALL THROUGH */
case OTG_STATE_B_IDLE:
if (otg_ctrl->id == 0)
phy->state = OTG_STATE_A_IDLE;
else if (otg_ctrl->b_sess_vld)
phy->state = OTG_STATE_B_PERIPHERAL;
break;
case OTG_STATE_B_PERIPHERAL:
if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0)
phy->state = OTG_STATE_B_IDLE;
break;
case OTG_STATE_A_IDLE:
if (otg_ctrl->id)
phy->state = OTG_STATE_B_IDLE;
else if (!(otg_ctrl->a_bus_drop) &&
(otg_ctrl->a_bus_req || otg_ctrl->a_srp_det))
phy->state = OTG_STATE_A_WAIT_VRISE;
break;
case OTG_STATE_A_WAIT_VRISE:
if (otg_ctrl->a_vbus_vld)
phy->state = OTG_STATE_A_WAIT_BCON;
break;
case OTG_STATE_A_WAIT_BCON:
if (otg_ctrl->id || otg_ctrl->a_bus_drop
|| otg_ctrl->a_wait_bcon_timeout) {
mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
phy->state = OTG_STATE_A_WAIT_VFALL;
otg_ctrl->a_bus_req = 0;
} else if (!otg_ctrl->a_vbus_vld) {
mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
phy->state = OTG_STATE_A_VBUS_ERR;
} else if (otg_ctrl->b_conn) {
mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
phy->state = OTG_STATE_A_HOST;
}
break;
case OTG_STATE_A_HOST:
if (otg_ctrl->id || !otg_ctrl->b_conn
|| otg_ctrl->a_bus_drop)
phy->state = OTG_STATE_A_WAIT_BCON;
else if (!otg_ctrl->a_vbus_vld)
phy->state = OTG_STATE_A_VBUS_ERR;
break;
case OTG_STATE_A_WAIT_VFALL:
if (otg_ctrl->id
|| (!otg_ctrl->b_conn && otg_ctrl->a_sess_vld)
|| otg_ctrl->a_bus_req)
phy->state = OTG_STATE_A_IDLE;
break;
case OTG_STATE_A_VBUS_ERR:
if (otg_ctrl->id || otg_ctrl->a_clr_err
|| otg_ctrl->a_bus_drop) {
otg_ctrl->a_clr_err = 0;
phy->state = OTG_STATE_A_WAIT_VFALL;
}
break;
default:
break;
}
}
static void mv_otg_work(struct work_struct *work)
{
struct mv_otg *mvotg;
struct usb_phy *phy;
struct usb_otg *otg;
int old_state;
mvotg = container_of(to_delayed_work(work), struct mv_otg, work);
run:
/* work queue is single thread, or we need spin_lock to protect */
phy = &mvotg->phy;
otg = phy->otg;
old_state = phy->state;
if (!mvotg->active)
return;
mv_otg_update_inputs(mvotg);
mv_otg_update_state(mvotg);
if (old_state != phy->state) {
dev_info(&mvotg->pdev->dev, "change from state %s to %s\n",
state_string[old_state],
state_string[phy->state]);
switch (phy->state) {
case OTG_STATE_B_IDLE:
otg->default_a = 0;
if (old_state == OTG_STATE_B_PERIPHERAL)
mv_otg_start_periphrals(mvotg, 0);
mv_otg_reset(mvotg);
mv_otg_disable(mvotg);
break;
case OTG_STATE_B_PERIPHERAL:
mv_otg_enable(mvotg);
mv_otg_start_periphrals(mvotg, 1);
break;
case OTG_STATE_A_IDLE:
otg->default_a = 1;
mv_otg_enable(mvotg);
if (old_state == OTG_STATE_A_WAIT_VFALL)
mv_otg_start_host(mvotg, 0);
mv_otg_reset(mvotg);
break;
case OTG_STATE_A_WAIT_VRISE:
mv_otg_set_vbus(otg, 1);
break;
case OTG_STATE_A_WAIT_BCON:
if (old_state != OTG_STATE_A_HOST)
mv_otg_start_host(mvotg, 1);
mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER,
T_A_WAIT_BCON,
mv_otg_timer_await_bcon);
/*
* Now, we directly enter A_HOST. So set b_conn = 1
* here. In fact, it need host driver to notify us.
*/
mvotg->otg_ctrl.b_conn = 1;
break;
case OTG_STATE_A_HOST:
break;
case OTG_STATE_A_WAIT_VFALL:
/*
* Now, we has exited A_HOST. So set b_conn = 0
* here. In fact, it need host driver to notify us.
*/
mvotg->otg_ctrl.b_conn = 0;
mv_otg_set_vbus(otg, 0);
break;
case OTG_STATE_A_VBUS_ERR:
break;
default:
break;
}
goto run;
}
}
static irqreturn_t mv_otg_irq(int irq, void *dev)
{
struct mv_otg *mvotg = dev;
u32 otgsc;
otgsc = readl(&mvotg->op_regs->otgsc);
writel(otgsc, &mvotg->op_regs->otgsc);
/*
* if we have vbus, then the vbus detection for B-device
* will be done by mv_otg_inputs_irq().
*/
if (mvotg->pdata->vbus)
if ((otgsc & OTGSC_STS_USB_ID) &&
!(otgsc & OTGSC_INTSTS_USB_ID))
return IRQ_NONE;
if ((otgsc & mvotg->irq_status) == 0)
return IRQ_NONE;
mv_otg_run_state_machine(mvotg, 0);
return IRQ_HANDLED;
}
static irqreturn_t mv_otg_inputs_irq(int irq, void *dev)
{
struct mv_otg *mvotg = dev;
/* The clock may disabled at this time */
if (!mvotg->active) {
mv_otg_enable(mvotg);
mv_otg_init_irq(mvotg);
}
mv_otg_run_state_machine(mvotg, 0);
return IRQ_HANDLED;
}
static ssize_t
get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
mvotg->otg_ctrl.a_bus_req);
}
static ssize_t
set_a_bus_req(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
if (count > 2)
return -1;
/* We will use this interface to change to A device */
if (mvotg->phy.state != OTG_STATE_B_IDLE
&& mvotg->phy.state != OTG_STATE_A_IDLE)
return -1;
/* The clock may disabled and we need to set irq for ID detected */
mv_otg_enable(mvotg);
mv_otg_init_irq(mvotg);
if (buf[0] == '1') {
mvotg->otg_ctrl.a_bus_req = 1;
mvotg->otg_ctrl.a_bus_drop = 0;
dev_dbg(&mvotg->pdev->dev,
"User request: a_bus_req = 1\n");
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
}
return count;
}
static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req,
set_a_bus_req);
static ssize_t
set_a_clr_err(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
if (!mvotg->phy.otg->default_a)
return -1;
if (count > 2)
return -1;
if (buf[0] == '1') {
mvotg->otg_ctrl.a_clr_err = 1;
dev_dbg(&mvotg->pdev->dev,
"User request: a_clr_err = 1\n");
}
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
return count;
}
static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
static ssize_t
get_a_bus_drop(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
mvotg->otg_ctrl.a_bus_drop);
}
static ssize_t
set_a_bus_drop(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
if (!mvotg->phy.otg->default_a)
return -1;
if (count > 2)
return -1;
if (buf[0] == '0') {
mvotg->otg_ctrl.a_bus_drop = 0;
dev_dbg(&mvotg->pdev->dev,
"User request: a_bus_drop = 0\n");
} else if (buf[0] == '1') {
mvotg->otg_ctrl.a_bus_drop = 1;
mvotg->otg_ctrl.a_bus_req = 0;
dev_dbg(&mvotg->pdev->dev,
"User request: a_bus_drop = 1\n");
dev_dbg(&mvotg->pdev->dev,
"User request: and a_bus_req = 0\n");
}
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
return count;
}
static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR,
get_a_bus_drop, set_a_bus_drop);
static struct attribute *inputs_attrs[] = {
&dev_attr_a_bus_req.attr,
&dev_attr_a_clr_err.attr,
&dev_attr_a_bus_drop.attr,
NULL,
};
static struct attribute_group inputs_attr_group = {
.name = "inputs",
.attrs = inputs_attrs,
};
int mv_otg_remove(struct platform_device *pdev)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
sysfs_remove_group(&mvotg->pdev->dev.kobj, &inputs_attr_group);
if (mvotg->qwork) {
flush_workqueue(mvotg->qwork);
destroy_workqueue(mvotg->qwork);
}
mv_otg_disable(mvotg);
usb_remove_phy(&mvotg->phy);
return 0;
}
static int mv_otg_probe(struct platform_device *pdev)
{
struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
struct mv_otg *mvotg;
struct usb_otg *otg;
struct resource *r;
int retval = 0, i;
if (pdata == NULL) {
dev_err(&pdev->dev, "failed to get platform data\n");
return -ENODEV;
}
mvotg = devm_kzalloc(&pdev->dev, sizeof(*mvotg), GFP_KERNEL);
if (!mvotg) {
dev_err(&pdev->dev, "failed to allocate memory!\n");
return -ENOMEM;
}
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
platform_set_drvdata(pdev, mvotg);
mvotg->pdev = pdev;
mvotg->pdata = pdata;
mvotg->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mvotg->clk))
return PTR_ERR(mvotg->clk);
mvotg->qwork = create_singlethread_workqueue("mv_otg_queue");
if (!mvotg->qwork) {
dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n");
return -ENOMEM;
}
INIT_DELAYED_WORK(&mvotg->work, mv_otg_work);
/* OTG common part */
mvotg->pdev = pdev;
mvotg->phy.dev = &pdev->dev;
mvotg->phy.otg = otg;
mvotg->phy.label = driver_name;
mvotg->phy.state = OTG_STATE_UNDEFINED;
otg->phy = &mvotg->phy;
otg->set_host = mv_otg_set_host;
otg->set_peripheral = mv_otg_set_peripheral;
otg->set_vbus = mv_otg_set_vbus;
for (i = 0; i < OTG_TIMER_NUM; i++)
init_timer(&mvotg->otg_ctrl.timer[i]);
r = platform_get_resource_byname(mvotg->pdev,
IORESOURCE_MEM, "phyregs");
if (r == NULL) {
dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
retval = -ENODEV;
goto err_destroy_workqueue;
}
mvotg->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (mvotg->phy_regs == NULL) {
dev_err(&pdev->dev, "failed to map phy I/O memory\n");
retval = -EFAULT;
goto err_destroy_workqueue;
}
r = platform_get_resource_byname(mvotg->pdev,
IORESOURCE_MEM, "capregs");
if (r == NULL) {
dev_err(&pdev->dev, "no I/O memory resource defined\n");
retval = -ENODEV;
goto err_destroy_workqueue;
}
mvotg->cap_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (mvotg->cap_regs == NULL) {
dev_err(&pdev->dev, "failed to map I/O memory\n");
retval = -EFAULT;
goto err_destroy_workqueue;
}
/* we will acces controller register, so enable the udc controller */
retval = mv_otg_enable_internal(mvotg);
if (retval) {
dev_err(&pdev->dev, "mv otg enable error %d\n", retval);
goto err_destroy_workqueue;
}
mvotg->op_regs =
(struct mv_otg_regs __iomem *) ((unsigned long) mvotg->cap_regs
+ (readl(mvotg->cap_regs) & CAPLENGTH_MASK));
if (pdata->id) {
retval = devm_request_threaded_irq(&pdev->dev, pdata->id->irq,
NULL, mv_otg_inputs_irq,
IRQF_ONESHOT, "id", mvotg);
if (retval) {
dev_info(&pdev->dev,
"Failed to request irq for ID\n");
pdata->id = NULL;
}
}
if (pdata->vbus) {
mvotg->clock_gating = 1;
retval = devm_request_threaded_irq(&pdev->dev, pdata->vbus->irq,
NULL, mv_otg_inputs_irq,
IRQF_ONESHOT, "vbus", mvotg);
if (retval) {
dev_info(&pdev->dev,
"Failed to request irq for VBUS, "
"disable clock gating\n");
mvotg->clock_gating = 0;
pdata->vbus = NULL;
}
}
if (pdata->disable_otg_clock_gating)
mvotg->clock_gating = 0;
mv_otg_reset(mvotg);
mv_otg_init_irq(mvotg);
r = platform_get_resource(mvotg->pdev, IORESOURCE_IRQ, 0);
if (r == NULL) {
dev_err(&pdev->dev, "no IRQ resource defined\n");
retval = -ENODEV;
goto err_disable_clk;
}
mvotg->irq = r->start;
if (devm_request_irq(&pdev->dev, mvotg->irq, mv_otg_irq, IRQF_SHARED,
driver_name, mvotg)) {
dev_err(&pdev->dev, "Request irq %d for OTG failed\n",
mvotg->irq);
mvotg->irq = 0;
retval = -ENODEV;
goto err_disable_clk;
}
retval = usb_add_phy(&mvotg->phy, USB_PHY_TYPE_USB2);
if (retval < 0) {
dev_err(&pdev->dev, "can't register transceiver, %d\n",
retval);
goto err_disable_clk;
}
retval = sysfs_create_group(&pdev->dev.kobj, &inputs_attr_group);
if (retval < 0) {
dev_dbg(&pdev->dev,
"Can't register sysfs attr group: %d\n", retval);
goto err_remove_phy;
}
spin_lock_init(&mvotg->wq_lock);
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 2 * HZ);
spin_unlock(&mvotg->wq_lock);
}
dev_info(&pdev->dev,
"successful probe OTG device %s clock gating.\n",
mvotg->clock_gating ? "with" : "without");
return 0;
err_remove_phy:
usb_remove_phy(&mvotg->phy);
err_disable_clk:
mv_otg_disable_internal(mvotg);
err_destroy_workqueue:
flush_workqueue(mvotg->qwork);
destroy_workqueue(mvotg->qwork);
return retval;
}
#ifdef CONFIG_PM
static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
if (mvotg->phy.state != OTG_STATE_B_IDLE) {
dev_info(&pdev->dev,
"OTG state is not B_IDLE, it is %d!\n",
mvotg->phy.state);
return -EAGAIN;
}
if (!mvotg->clock_gating)
mv_otg_disable_internal(mvotg);
return 0;
}
static int mv_otg_resume(struct platform_device *pdev)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
u32 otgsc;
if (!mvotg->clock_gating) {
mv_otg_enable_internal(mvotg);
otgsc = readl(&mvotg->op_regs->otgsc);
otgsc |= mvotg->irq_en;
writel(otgsc, &mvotg->op_regs->otgsc);
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 0);
spin_unlock(&mvotg->wq_lock);
}
}
return 0;
}
#endif
static struct platform_driver mv_otg_driver = {
.probe = mv_otg_probe,
.remove = __exit_p(mv_otg_remove),
.driver = {
.owner = THIS_MODULE,
.name = driver_name,
},
#ifdef CONFIG_PM
.suspend = mv_otg_suspend,
.resume = mv_otg_resume,
#endif
};
module_platform_driver(mv_otg_driver);
| gpl-2.0 |
s9yobena/linux-btrfs | sound/pci/ctxfi/cthardware.c | 2395 | 1675 | /**
* Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*
* @File cthardware.c
*
* @Brief
* This file contains the implementation of hardware access methord.
*
* @Author Liu Chun
* @Date Jun 26 2008
*
*/
#include "cthardware.h"
#include "cthw20k1.h"
#include "cthw20k2.h"
#include <linux/bug.h>
int create_hw_obj(struct pci_dev *pci, enum CHIPTYP chip_type,
enum CTCARDS model, struct hw **rhw)
{
int err;
switch (chip_type) {
case ATC20K1:
err = create_20k1_hw_obj(rhw);
break;
case ATC20K2:
err = create_20k2_hw_obj(rhw);
break;
default:
err = -ENODEV;
break;
}
if (err)
return err;
(*rhw)->pci = pci;
(*rhw)->chip_type = chip_type;
(*rhw)->model = model;
return 0;
}
int destroy_hw_obj(struct hw *hw)
{
int err;
switch (hw->pci->device) {
case 0x0005: /* 20k1 device */
err = destroy_20k1_hw_obj(hw);
break;
case 0x000B: /* 20k2 device */
err = destroy_20k2_hw_obj(hw);
break;
default:
err = -ENODEV;
break;
}
return err;
}
unsigned int get_field(unsigned int data, unsigned int field)
{
int i;
BUG_ON(!field);
/* @field should always be greater than 0 */
for (i = 0; !(field & (1 << i)); )
i++;
return (data & field) >> i;
}
void set_field(unsigned int *data, unsigned int field, unsigned int value)
{
int i;
BUG_ON(!field);
/* @field should always be greater than 0 */
for (i = 0; !(field & (1 << i)); )
i++;
*data = (*data & (~field)) | ((value << i) & field);
}
| gpl-2.0 |
dheerajjamwal/LTSI_backports | arch/arm/mach-s3c24xx/clock-s3c2440.c | 2395 | 5920 | /* linux/arch/arm/mach-s3c2440/clock.c
*
* Copyright (c) 2004-2005 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2440 Clock support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/serial_core.h>
#include <mach/hardware.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <mach/regs-clock.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/regs-serial.h>
/* S3C2440 extended clock support */
static unsigned long s3c2440_camif_upll_round(struct clk *clk,
unsigned long rate)
{
unsigned long parent_rate = clk_get_rate(clk->parent);
int div;
if (rate > parent_rate)
return parent_rate;
/* note, we remove the +/- 1 calculations for the divisor */
div = (parent_rate / rate) / 2;
if (div < 1)
div = 1;
else if (div > 16)
div = 16;
return parent_rate / (div * 2);
}
static int s3c2440_camif_upll_setrate(struct clk *clk, unsigned long rate)
{
unsigned long parent_rate = clk_get_rate(clk->parent);
unsigned long camdivn = __raw_readl(S3C2440_CAMDIVN);
rate = s3c2440_camif_upll_round(clk, rate);
camdivn &= ~(S3C2440_CAMDIVN_CAMCLK_SEL | S3C2440_CAMDIVN_CAMCLK_MASK);
if (rate != parent_rate) {
camdivn |= S3C2440_CAMDIVN_CAMCLK_SEL;
camdivn |= (((parent_rate / rate) / 2) - 1);
}
__raw_writel(camdivn, S3C2440_CAMDIVN);
return 0;
}
static unsigned long s3c2440_camif_upll_getrate(struct clk *clk)
{
unsigned long parent_rate = clk_get_rate(clk->parent);
unsigned long camdivn = __raw_readl(S3C2440_CAMDIVN);
if (!(camdivn & S3C2440_CAMDIVN_CAMCLK_SEL))
return parent_rate;
camdivn &= S3C2440_CAMDIVN_CAMCLK_MASK;
return parent_rate / (camdivn + 1) / 2;
}
/* Extra S3C2440 clocks */
static struct clk s3c2440_clk_cam = {
.name = "camif",
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2440_CLKCON_CAMERA,
};
static struct clk s3c2440_clk_cam_upll = {
.name = "camif-upll",
.ops = &(struct clk_ops) {
.set_rate = s3c2440_camif_upll_setrate,
.get_rate = s3c2440_camif_upll_getrate,
.round_rate = s3c2440_camif_upll_round,
},
};
static struct clk s3c2440_clk_ac97 = {
.name = "ac97",
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2440_CLKCON_AC97,
};
#define S3C24XX_VA_UART0 (S3C_VA_UART)
#define S3C24XX_VA_UART1 (S3C_VA_UART + 0x4000 )
#define S3C24XX_VA_UART2 (S3C_VA_UART + 0x8000 )
#define S3C24XX_VA_UART3 (S3C_VA_UART + 0xC000 )
static unsigned long s3c2440_fclk_n_getrate(struct clk *clk)
{
unsigned long ucon0, ucon1, ucon2, divisor;
/* the fun of calculating the uart divisors on the s3c2440 */
ucon0 = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UCON);
ucon1 = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UCON);
ucon2 = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UCON);
ucon0 &= S3C2440_UCON0_DIVMASK;
ucon1 &= S3C2440_UCON1_DIVMASK;
ucon2 &= S3C2440_UCON2_DIVMASK;
if (ucon0 != 0)
divisor = (ucon0 >> S3C2440_UCON_DIVSHIFT) + 6;
else if (ucon1 != 0)
divisor = (ucon1 >> S3C2440_UCON_DIVSHIFT) + 21;
else if (ucon2 != 0)
divisor = (ucon2 >> S3C2440_UCON_DIVSHIFT) + 36;
else
/* manual calims 44, seems to be 9 */
divisor = 9;
return clk_get_rate(clk->parent) / divisor;
}
static struct clk s3c2440_clk_fclk_n = {
.name = "fclk_n",
.parent = &clk_f,
.ops = &(struct clk_ops) {
.get_rate = s3c2440_fclk_n_getrate,
},
};
static struct clk_lookup s3c2440_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
};
static int __init_refok s3c2440_clk_add(struct device *dev, struct subsys_interface *sif)
{
struct clk *clock_upll;
struct clk *clock_h;
struct clk *clock_p;
clock_p = clk_get(NULL, "pclk");
clock_h = clk_get(NULL, "hclk");
clock_upll = clk_get(NULL, "upll");
if (IS_ERR(clock_p) || IS_ERR(clock_h) || IS_ERR(clock_upll)) {
printk(KERN_ERR "S3C2440: Failed to get parent clocks\n");
return -EINVAL;
}
s3c2440_clk_cam.parent = clock_h;
s3c2440_clk_ac97.parent = clock_p;
s3c2440_clk_cam_upll.parent = clock_upll;
s3c24xx_register_clock(&s3c2440_clk_fclk_n);
s3c24xx_register_clock(&s3c2440_clk_ac97);
s3c24xx_register_clock(&s3c2440_clk_cam);
s3c24xx_register_clock(&s3c2440_clk_cam_upll);
clkdev_add_table(s3c2440_clk_lookup, ARRAY_SIZE(s3c2440_clk_lookup));
clk_disable(&s3c2440_clk_ac97);
clk_disable(&s3c2440_clk_cam);
return 0;
}
static struct subsys_interface s3c2440_clk_interface = {
.name = "s3c2440_clk",
.subsys = &s3c2440_subsys,
.add_dev = s3c2440_clk_add,
};
static __init int s3c24xx_clk_init(void)
{
return subsys_interface_register(&s3c2440_clk_interface);
}
arch_initcall(s3c24xx_clk_init);
| gpl-2.0 |
friedrich420/SPRINT-Note-4-AEL-Kernel-Lollipop-Source | drivers/video/pm3fb.c | 2395 | 43407 | /*
* linux/drivers/video/pm3fb.c -- 3DLabs Permedia3 frame buffer device
*
* Copyright (C) 2001 Romain Dolbeau <romain@dolbeau.org>.
*
* Ported to 2.6 kernel on 1 May 2007 by Krzysztof Helt <krzysztof.h1@wp.pl>
* based on pm2fb.c
*
* Based on code written by:
* Sven Luther, <luther@dpt-info.u-strasbg.fr>
* Alan Hourihane, <alanh@fairlite.demon.co.uk>
* Russell King, <rmk@arm.linux.org.uk>
* Based on linux/drivers/video/skeletonfb.c:
* Copyright (C) 1997 Geert Uytterhoeven
* Based on linux/driver/video/pm2fb.c:
* Copyright (C) 1998-1999 Ilario Nardinocchi (nardinoc@CS.UniBO.IT)
* Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include <video/pm3fb.h>
#if !defined(CONFIG_PCI)
#error "Only generic PCI cards supported."
#endif
#undef PM3FB_MASTER_DEBUG
#ifdef PM3FB_MASTER_DEBUG
#define DPRINTK(a, b...) \
printk(KERN_DEBUG "pm3fb: %s: " a, __func__ , ## b)
#else
#define DPRINTK(a, b...)
#endif
#define PM3_PIXMAP_SIZE (2048 * 4)
/*
* Driver data
*/
static int hwcursor = 1;
static char *mode_option;
static bool noaccel;
/* mtrr option */
#ifdef CONFIG_MTRR
static bool nomtrr;
#endif
/*
* This structure defines the hardware state of the graphics card. Normally
* you place this in a header file in linux/include/video. This file usually
* also includes register information. That allows other driver subsystems
* and userland applications the ability to use the same header file to
* avoid duplicate work and easy porting of software.
*/
struct pm3_par {
unsigned char __iomem *v_regs;/* virtual address of p_regs */
u32 video; /* video flags before blanking */
u32 base; /* screen base in 128 bits unit */
u32 palette[16];
int mtrr_handle;
};
/*
* Here we define the default structs fb_fix_screeninfo and fb_var_screeninfo
* if we don't use modedb. If we do use modedb see pm3fb_init how to use it
* to get a fb_var_screeninfo. Otherwise define a default var as well.
*/
static struct fb_fix_screeninfo pm3fb_fix = {
.id = "Permedia3",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 1,
.ypanstep = 1,
.ywrapstep = 0,
.accel = FB_ACCEL_3DLABS_PERMEDIA3,
};
/*
* Utility functions
*/
static inline u32 PM3_READ_REG(struct pm3_par *par, s32 off)
{
return fb_readl(par->v_regs + off);
}
static inline void PM3_WRITE_REG(struct pm3_par *par, s32 off, u32 v)
{
fb_writel(v, par->v_regs + off);
}
static inline void PM3_WAIT(struct pm3_par *par, u32 n)
{
while (PM3_READ_REG(par, PM3InFIFOSpace) < n)
cpu_relax();
}
static inline void PM3_WRITE_DAC_REG(struct pm3_par *par, unsigned r, u8 v)
{
PM3_WAIT(par, 3);
PM3_WRITE_REG(par, PM3RD_IndexHigh, (r >> 8) & 0xff);
PM3_WRITE_REG(par, PM3RD_IndexLow, r & 0xff);
wmb();
PM3_WRITE_REG(par, PM3RD_IndexedData, v);
wmb();
}
static inline void pm3fb_set_color(struct pm3_par *par, unsigned char regno,
unsigned char r, unsigned char g, unsigned char b)
{
PM3_WAIT(par, 4);
PM3_WRITE_REG(par, PM3RD_PaletteWriteAddress, regno);
wmb();
PM3_WRITE_REG(par, PM3RD_PaletteData, r);
wmb();
PM3_WRITE_REG(par, PM3RD_PaletteData, g);
wmb();
PM3_WRITE_REG(par, PM3RD_PaletteData, b);
wmb();
}
static void pm3fb_clear_colormap(struct pm3_par *par,
unsigned char r, unsigned char g, unsigned char b)
{
int i;
for (i = 0; i < 256 ; i++)
pm3fb_set_color(par, i, r, g, b);
}
/* Calculating various clock parameters */
static void pm3fb_calculate_clock(unsigned long reqclock,
unsigned char *prescale,
unsigned char *feedback,
unsigned char *postscale)
{
int f, pre, post;
unsigned long freq;
long freqerr = 1000;
long currerr;
for (f = 1; f < 256; f++) {
for (pre = 1; pre < 256; pre++) {
for (post = 0; post < 5; post++) {
freq = ((2*PM3_REF_CLOCK * f) >> post) / pre;
currerr = (reqclock > freq)
? reqclock - freq
: freq - reqclock;
if (currerr < freqerr) {
freqerr = currerr;
*feedback = f;
*prescale = pre;
*postscale = post;
}
}
}
}
}
static inline int pm3fb_depth(const struct fb_var_screeninfo *var)
{
if (var->bits_per_pixel == 16)
return var->red.length + var->green.length
+ var->blue.length;
return var->bits_per_pixel;
}
static inline int pm3fb_shift_bpp(unsigned bpp, int v)
{
switch (bpp) {
case 8:
return (v >> 4);
case 16:
return (v >> 3);
case 32:
return (v >> 2);
}
DPRINTK("Unsupported depth %u\n", bpp);
return 0;
}
/* acceleration */
static int pm3fb_sync(struct fb_info *info)
{
struct pm3_par *par = info->par;
PM3_WAIT(par, 2);
PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
PM3_WRITE_REG(par, PM3Sync, 0);
mb();
do {
while ((PM3_READ_REG(par, PM3OutFIFOWords)) == 0)
cpu_relax();
} while ((PM3_READ_REG(par, PM3OutputFifo)) != PM3Sync_Tag);
return 0;
}
static void pm3fb_init_engine(struct fb_info *info)
{
struct pm3_par *par = info->par;
const u32 width = (info->var.xres_virtual + 7) & ~7;
PM3_WAIT(par, 50);
PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
PM3_WRITE_REG(par, PM3StatisticMode, 0x0);
PM3_WRITE_REG(par, PM3DeltaMode, 0x0);
PM3_WRITE_REG(par, PM3RasterizerMode, 0x0);
PM3_WRITE_REG(par, PM3ScissorMode, 0x0);
PM3_WRITE_REG(par, PM3LineStippleMode, 0x0);
PM3_WRITE_REG(par, PM3AreaStippleMode, 0x0);
PM3_WRITE_REG(par, PM3GIDMode, 0x0);
PM3_WRITE_REG(par, PM3DepthMode, 0x0);
PM3_WRITE_REG(par, PM3StencilMode, 0x0);
PM3_WRITE_REG(par, PM3StencilData, 0x0);
PM3_WRITE_REG(par, PM3ColorDDAMode, 0x0);
PM3_WRITE_REG(par, PM3TextureCoordMode, 0x0);
PM3_WRITE_REG(par, PM3TextureIndexMode0, 0x0);
PM3_WRITE_REG(par, PM3TextureIndexMode1, 0x0);
PM3_WRITE_REG(par, PM3TextureReadMode, 0x0);
PM3_WRITE_REG(par, PM3LUTMode, 0x0);
PM3_WRITE_REG(par, PM3TextureFilterMode, 0x0);
PM3_WRITE_REG(par, PM3TextureCompositeMode, 0x0);
PM3_WRITE_REG(par, PM3TextureApplicationMode, 0x0);
PM3_WRITE_REG(par, PM3TextureCompositeColorMode1, 0x0);
PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode1, 0x0);
PM3_WRITE_REG(par, PM3TextureCompositeColorMode0, 0x0);
PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode0, 0x0);
PM3_WRITE_REG(par, PM3FogMode, 0x0);
PM3_WRITE_REG(par, PM3ChromaTestMode, 0x0);
PM3_WRITE_REG(par, PM3AlphaTestMode, 0x0);
PM3_WRITE_REG(par, PM3AntialiasMode, 0x0);
PM3_WRITE_REG(par, PM3YUVMode, 0x0);
PM3_WRITE_REG(par, PM3AlphaBlendColorMode, 0x0);
PM3_WRITE_REG(par, PM3AlphaBlendAlphaMode, 0x0);
PM3_WRITE_REG(par, PM3DitherMode, 0x0);
PM3_WRITE_REG(par, PM3LogicalOpMode, 0x0);
PM3_WRITE_REG(par, PM3RouterMode, 0x0);
PM3_WRITE_REG(par, PM3Window, 0x0);
PM3_WRITE_REG(par, PM3Config2D, 0x0);
PM3_WRITE_REG(par, PM3SpanColorMask, 0xffffffff);
PM3_WRITE_REG(par, PM3XBias, 0x0);
PM3_WRITE_REG(par, PM3YBias, 0x0);
PM3_WRITE_REG(par, PM3DeltaControl, 0x0);
PM3_WRITE_REG(par, PM3BitMaskPattern, 0xffffffff);
PM3_WRITE_REG(par, PM3FBDestReadEnables,
PM3FBDestReadEnables_E(0xff) |
PM3FBDestReadEnables_R(0xff) |
PM3FBDestReadEnables_ReferenceAlpha(0xff));
PM3_WRITE_REG(par, PM3FBDestReadBufferAddr0, 0x0);
PM3_WRITE_REG(par, PM3FBDestReadBufferOffset0, 0x0);
PM3_WRITE_REG(par, PM3FBDestReadBufferWidth0,
PM3FBDestReadBufferWidth_Width(width));
PM3_WRITE_REG(par, PM3FBDestReadMode,
PM3FBDestReadMode_ReadEnable |
PM3FBDestReadMode_Enable0);
PM3_WRITE_REG(par, PM3FBSourceReadBufferAddr, 0x0);
PM3_WRITE_REG(par, PM3FBSourceReadBufferOffset, 0x0);
PM3_WRITE_REG(par, PM3FBSourceReadBufferWidth,
PM3FBSourceReadBufferWidth_Width(width));
PM3_WRITE_REG(par, PM3FBSourceReadMode,
PM3FBSourceReadMode_Blocking |
PM3FBSourceReadMode_ReadEnable);
PM3_WAIT(par, 2);
{
/* invert bits in bitmask */
unsigned long rm = 1 | (3 << 7);
switch (info->var.bits_per_pixel) {
case 8:
PM3_WRITE_REG(par, PM3PixelSize,
PM3PixelSize_GLOBAL_8BIT);
#ifdef __BIG_ENDIAN
rm |= 3 << 15;
#endif
break;
case 16:
PM3_WRITE_REG(par, PM3PixelSize,
PM3PixelSize_GLOBAL_16BIT);
#ifdef __BIG_ENDIAN
rm |= 2 << 15;
#endif
break;
case 32:
PM3_WRITE_REG(par, PM3PixelSize,
PM3PixelSize_GLOBAL_32BIT);
break;
default:
DPRINTK(1, "Unsupported depth %d\n",
info->var.bits_per_pixel);
break;
}
PM3_WRITE_REG(par, PM3RasterizerMode, rm);
}
PM3_WAIT(par, 20);
PM3_WRITE_REG(par, PM3FBSoftwareWriteMask, 0xffffffff);
PM3_WRITE_REG(par, PM3FBHardwareWriteMask, 0xffffffff);
PM3_WRITE_REG(par, PM3FBWriteMode,
PM3FBWriteMode_WriteEnable |
PM3FBWriteMode_OpaqueSpan |
PM3FBWriteMode_Enable0);
PM3_WRITE_REG(par, PM3FBWriteBufferAddr0, 0x0);
PM3_WRITE_REG(par, PM3FBWriteBufferOffset0, 0x0);
PM3_WRITE_REG(par, PM3FBWriteBufferWidth0,
PM3FBWriteBufferWidth_Width(width));
PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 0x0);
{
/* size in lines of FB */
unsigned long sofb = info->screen_size /
info->fix.line_length;
if (sofb > 4095)
PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 4095);
else
PM3_WRITE_REG(par, PM3SizeOfFramebuffer, sofb);
switch (info->var.bits_per_pixel) {
case 8:
PM3_WRITE_REG(par, PM3DitherMode,
(1 << 10) | (2 << 3));
break;
case 16:
PM3_WRITE_REG(par, PM3DitherMode,
(1 << 10) | (1 << 3));
break;
case 32:
PM3_WRITE_REG(par, PM3DitherMode,
(1 << 10) | (0 << 3));
break;
default:
DPRINTK(1, "Unsupported depth %d\n",
info->current_par->depth);
break;
}
}
PM3_WRITE_REG(par, PM3dXDom, 0x0);
PM3_WRITE_REG(par, PM3dXSub, 0x0);
PM3_WRITE_REG(par, PM3dY, 1 << 16);
PM3_WRITE_REG(par, PM3StartXDom, 0x0);
PM3_WRITE_REG(par, PM3StartXSub, 0x0);
PM3_WRITE_REG(par, PM3StartY, 0x0);
PM3_WRITE_REG(par, PM3Count, 0x0);
/* Disable LocalBuffer. better safe than sorry */
PM3_WRITE_REG(par, PM3LBDestReadMode, 0x0);
PM3_WRITE_REG(par, PM3LBDestReadEnables, 0x0);
PM3_WRITE_REG(par, PM3LBSourceReadMode, 0x0);
PM3_WRITE_REG(par, PM3LBWriteMode, 0x0);
pm3fb_sync(info);
}
static void pm3fb_fillrect(struct fb_info *info,
const struct fb_fillrect *region)
{
struct pm3_par *par = info->par;
struct fb_fillrect modded;
int vxres, vyres;
int rop;
u32 color = (info->fix.visual == FB_VISUAL_TRUECOLOR) ?
((u32 *)info->pseudo_palette)[region->color] : region->color;
if (info->state != FBINFO_STATE_RUNNING)
return;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_fillrect(info, region);
return;
}
if (region->rop == ROP_COPY )
rop = PM3Config2D_ForegroundROP(0x3); /* GXcopy */
else
rop = PM3Config2D_ForegroundROP(0x6) | /* GXxor */
PM3Config2D_FBDestReadEnable;
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
memcpy(&modded, region, sizeof(struct fb_fillrect));
if (!modded.width || !modded.height ||
modded.dx >= vxres || modded.dy >= vyres)
return;
if (modded.dx + modded.width > vxres)
modded.width = vxres - modded.dx;
if (modded.dy + modded.height > vyres)
modded.height = vyres - modded.dy;
if (info->var.bits_per_pixel == 8)
color |= color << 8;
if (info->var.bits_per_pixel <= 16)
color |= color << 16;
PM3_WAIT(par, 4);
/* ROP Ox3 is GXcopy */
PM3_WRITE_REG(par, PM3Config2D,
PM3Config2D_UseConstantSource |
PM3Config2D_ForegroundROPEnable |
rop |
PM3Config2D_FBWriteEnable);
PM3_WRITE_REG(par, PM3ForegroundColor, color);
PM3_WRITE_REG(par, PM3RectanglePosition,
PM3RectanglePosition_XOffset(modded.dx) |
PM3RectanglePosition_YOffset(modded.dy));
PM3_WRITE_REG(par, PM3Render2D,
PM3Render2D_XPositive |
PM3Render2D_YPositive |
PM3Render2D_Operation_Normal |
PM3Render2D_SpanOperation |
PM3Render2D_Width(modded.width) |
PM3Render2D_Height(modded.height));
}
static void pm3fb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct pm3_par *par = info->par;
struct fb_copyarea modded;
u32 vxres, vyres;
int x_align, o_x, o_y;
if (info->state != FBINFO_STATE_RUNNING)
return;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_copyarea(info, area);
return;
}
memcpy(&modded, area, sizeof(struct fb_copyarea));
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
if (!modded.width || !modded.height ||
modded.sx >= vxres || modded.sy >= vyres ||
modded.dx >= vxres || modded.dy >= vyres)
return;
if (modded.sx + modded.width > vxres)
modded.width = vxres - modded.sx;
if (modded.dx + modded.width > vxres)
modded.width = vxres - modded.dx;
if (modded.sy + modded.height > vyres)
modded.height = vyres - modded.sy;
if (modded.dy + modded.height > vyres)
modded.height = vyres - modded.dy;
o_x = modded.sx - modded.dx; /*(sx > dx ) ? (sx - dx) : (dx - sx); */
o_y = modded.sy - modded.dy; /*(sy > dy ) ? (sy - dy) : (dy - sy); */
x_align = (modded.sx & 0x1f);
PM3_WAIT(par, 6);
PM3_WRITE_REG(par, PM3Config2D,
PM3Config2D_UserScissorEnable |
PM3Config2D_ForegroundROPEnable |
PM3Config2D_Blocking |
PM3Config2D_ForegroundROP(0x3) | /* Ox3 is GXcopy */
PM3Config2D_FBWriteEnable);
PM3_WRITE_REG(par, PM3ScissorMinXY,
((modded.dy & 0x0fff) << 16) | (modded.dx & 0x0fff));
PM3_WRITE_REG(par, PM3ScissorMaxXY,
(((modded.dy + modded.height) & 0x0fff) << 16) |
((modded.dx + modded.width) & 0x0fff));
PM3_WRITE_REG(par, PM3FBSourceReadBufferOffset,
PM3FBSourceReadBufferOffset_XOffset(o_x) |
PM3FBSourceReadBufferOffset_YOffset(o_y));
PM3_WRITE_REG(par, PM3RectanglePosition,
PM3RectanglePosition_XOffset(modded.dx - x_align) |
PM3RectanglePosition_YOffset(modded.dy));
PM3_WRITE_REG(par, PM3Render2D,
((modded.sx > modded.dx) ? PM3Render2D_XPositive : 0) |
((modded.sy > modded.dy) ? PM3Render2D_YPositive : 0) |
PM3Render2D_Operation_Normal |
PM3Render2D_SpanOperation |
PM3Render2D_FBSourceReadEnable |
PM3Render2D_Width(modded.width + x_align) |
PM3Render2D_Height(modded.height));
}
static void pm3fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct pm3_par *par = info->par;
u32 height = image->height;
u32 fgx, bgx;
const u32 *src = (const u32 *)image->data;
if (info->state != FBINFO_STATE_RUNNING)
return;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_imageblit(info, image);
return;
}
switch (info->fix.visual) {
case FB_VISUAL_PSEUDOCOLOR:
fgx = image->fg_color;
bgx = image->bg_color;
break;
case FB_VISUAL_TRUECOLOR:
default:
fgx = par->palette[image->fg_color];
bgx = par->palette[image->bg_color];
break;
}
if (image->depth != 1) {
cfb_imageblit(info, image);
return;
}
if (info->var.bits_per_pixel == 8) {
fgx |= fgx << 8;
bgx |= bgx << 8;
}
if (info->var.bits_per_pixel <= 16) {
fgx |= fgx << 16;
bgx |= bgx << 16;
}
PM3_WAIT(par, 7);
PM3_WRITE_REG(par, PM3ForegroundColor, fgx);
PM3_WRITE_REG(par, PM3BackgroundColor, bgx);
/* ROP Ox3 is GXcopy */
PM3_WRITE_REG(par, PM3Config2D,
PM3Config2D_UserScissorEnable |
PM3Config2D_UseConstantSource |
PM3Config2D_ForegroundROPEnable |
PM3Config2D_ForegroundROP(0x3) |
PM3Config2D_OpaqueSpan |
PM3Config2D_FBWriteEnable);
PM3_WRITE_REG(par, PM3ScissorMinXY,
((image->dy & 0x0fff) << 16) | (image->dx & 0x0fff));
PM3_WRITE_REG(par, PM3ScissorMaxXY,
(((image->dy + image->height) & 0x0fff) << 16) |
((image->dx + image->width) & 0x0fff));
PM3_WRITE_REG(par, PM3RectanglePosition,
PM3RectanglePosition_XOffset(image->dx) |
PM3RectanglePosition_YOffset(image->dy));
PM3_WRITE_REG(par, PM3Render2D,
PM3Render2D_XPositive |
PM3Render2D_YPositive |
PM3Render2D_Operation_SyncOnBitMask |
PM3Render2D_SpanOperation |
PM3Render2D_Width(image->width) |
PM3Render2D_Height(image->height));
while (height--) {
int width = ((image->width + 7) >> 3)
+ info->pixmap.scan_align - 1;
width >>= 2;
while (width >= PM3_FIFO_SIZE) {
int i = PM3_FIFO_SIZE - 1;
PM3_WAIT(par, PM3_FIFO_SIZE);
while (i--) {
PM3_WRITE_REG(par, PM3BitMaskPattern, *src);
src++;
}
width -= PM3_FIFO_SIZE - 1;
}
PM3_WAIT(par, width + 1);
while (width--) {
PM3_WRITE_REG(par, PM3BitMaskPattern, *src);
src++;
}
}
}
/* end of acceleration functions */
/*
* Hardware Cursor support.
*/
static const u8 cursor_bits_lookup[16] = {
0x00, 0x40, 0x10, 0x50, 0x04, 0x44, 0x14, 0x54,
0x01, 0x41, 0x11, 0x51, 0x05, 0x45, 0x15, 0x55
};
static int pm3fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct pm3_par *par = info->par;
u8 mode;
if (!hwcursor)
return -EINVAL; /* just to force soft_cursor() call */
/* Too large of a cursor or wrong bpp :-( */
if (cursor->image.width > 64 ||
cursor->image.height > 64 ||
cursor->image.depth > 1)
return -EINVAL;
mode = PM3RD_CursorMode_TYPE_X;
if (cursor->enable)
mode |= PM3RD_CursorMode_CURSOR_ENABLE;
PM3_WRITE_DAC_REG(par, PM3RD_CursorMode, mode);
/*
* If the cursor is not be changed this means either we want the
* current cursor state (if enable is set) or we want to query what
* we can do with the cursor (if enable is not set)
*/
if (!cursor->set)
return 0;
if (cursor->set & FB_CUR_SETPOS) {
int x = cursor->image.dx - info->var.xoffset;
int y = cursor->image.dy - info->var.yoffset;
PM3_WRITE_DAC_REG(par, PM3RD_CursorXLow, x & 0xff);
PM3_WRITE_DAC_REG(par, PM3RD_CursorXHigh, (x >> 8) & 0xf);
PM3_WRITE_DAC_REG(par, PM3RD_CursorYLow, y & 0xff);
PM3_WRITE_DAC_REG(par, PM3RD_CursorYHigh, (y >> 8) & 0xf);
}
if (cursor->set & FB_CUR_SETHOT) {
PM3_WRITE_DAC_REG(par, PM3RD_CursorHotSpotX,
cursor->hot.x & 0x3f);
PM3_WRITE_DAC_REG(par, PM3RD_CursorHotSpotY,
cursor->hot.y & 0x3f);
}
if (cursor->set & FB_CUR_SETCMAP) {
u32 fg_idx = cursor->image.fg_color;
u32 bg_idx = cursor->image.bg_color;
struct fb_cmap cmap = info->cmap;
/* the X11 driver says one should use these color registers */
PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(39),
cmap.red[fg_idx] >> 8 );
PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(40),
cmap.green[fg_idx] >> 8 );
PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(41),
cmap.blue[fg_idx] >> 8 );
PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(42),
cmap.red[bg_idx] >> 8 );
PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(43),
cmap.green[bg_idx] >> 8 );
PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(44),
cmap.blue[bg_idx] >> 8 );
}
if (cursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
u8 *bitmap = (u8 *)cursor->image.data;
u8 *mask = (u8 *)cursor->mask;
int i;
int pos = PM3RD_CursorPattern(0);
for (i = 0; i < cursor->image.height; i++) {
int j = (cursor->image.width + 7) >> 3;
int k = 8 - j;
for (; j > 0; j--) {
u8 data = *bitmap ^ *mask;
if (cursor->rop == ROP_COPY)
data = *mask & *bitmap;
/* Upper 4 bits of bitmap data */
PM3_WRITE_DAC_REG(par, pos++,
cursor_bits_lookup[data >> 4] |
(cursor_bits_lookup[*mask >> 4] << 1));
/* Lower 4 bits of bitmap */
PM3_WRITE_DAC_REG(par, pos++,
cursor_bits_lookup[data & 0xf] |
(cursor_bits_lookup[*mask & 0xf] << 1));
bitmap++;
mask++;
}
for (; k > 0; k--) {
PM3_WRITE_DAC_REG(par, pos++, 0);
PM3_WRITE_DAC_REG(par, pos++, 0);
}
}
while (pos < PM3RD_CursorPattern(1024))
PM3_WRITE_DAC_REG(par, pos++, 0);
}
return 0;
}
/* write the mode to registers */
static void pm3fb_write_mode(struct fb_info *info)
{
struct pm3_par *par = info->par;
char tempsync = 0x00;
char tempmisc = 0x00;
const u32 hsstart = info->var.right_margin;
const u32 hsend = hsstart + info->var.hsync_len;
const u32 hbend = hsend + info->var.left_margin;
const u32 xres = (info->var.xres + 31) & ~31;
const u32 htotal = xres + hbend;
const u32 vsstart = info->var.lower_margin;
const u32 vsend = vsstart + info->var.vsync_len;
const u32 vbend = vsend + info->var.upper_margin;
const u32 vtotal = info->var.yres + vbend;
const u32 width = (info->var.xres_virtual + 7) & ~7;
const unsigned bpp = info->var.bits_per_pixel;
PM3_WAIT(par, 20);
PM3_WRITE_REG(par, PM3MemBypassWriteMask, 0xffffffff);
PM3_WRITE_REG(par, PM3Aperture0, 0x00000000);
PM3_WRITE_REG(par, PM3Aperture1, 0x00000000);
PM3_WRITE_REG(par, PM3FIFODis, 0x00000007);
PM3_WRITE_REG(par, PM3HTotal,
pm3fb_shift_bpp(bpp, htotal - 1));
PM3_WRITE_REG(par, PM3HsEnd,
pm3fb_shift_bpp(bpp, hsend));
PM3_WRITE_REG(par, PM3HsStart,
pm3fb_shift_bpp(bpp, hsstart));
PM3_WRITE_REG(par, PM3HbEnd,
pm3fb_shift_bpp(bpp, hbend));
PM3_WRITE_REG(par, PM3HgEnd,
pm3fb_shift_bpp(bpp, hbend));
PM3_WRITE_REG(par, PM3ScreenStride,
pm3fb_shift_bpp(bpp, width));
PM3_WRITE_REG(par, PM3VTotal, vtotal - 1);
PM3_WRITE_REG(par, PM3VsEnd, vsend - 1);
PM3_WRITE_REG(par, PM3VsStart, vsstart - 1);
PM3_WRITE_REG(par, PM3VbEnd, vbend);
switch (bpp) {
case 8:
PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_8BIT);
PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_8BIT);
break;
case 16:
#ifndef __BIG_ENDIAN
PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_16BIT);
PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_16BIT);
#else
PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_16BIT |
PM3ByApertureMode_BYTESWAP_BADC);
PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_16BIT |
PM3ByApertureMode_BYTESWAP_BADC);
#endif /* ! __BIG_ENDIAN */
break;
case 32:
#ifndef __BIG_ENDIAN
PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_32BIT);
PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_32BIT);
#else
PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_32BIT |
PM3ByApertureMode_BYTESWAP_DCBA);
PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_32BIT |
PM3ByApertureMode_BYTESWAP_DCBA);
#endif /* ! __BIG_ENDIAN */
break;
default:
DPRINTK("Unsupported depth %d\n", bpp);
break;
}
/*
* Oxygen VX1 - it appears that setting PM3VideoControl and
* then PM3RD_SyncControl to the same SYNC settings undoes
* any net change - they seem to xor together. Only set the
* sync options in PM3RD_SyncControl. --rmk
*/
{
unsigned int video = par->video;
video &= ~(PM3VideoControl_HSYNC_MASK |
PM3VideoControl_VSYNC_MASK);
video |= PM3VideoControl_HSYNC_ACTIVE_HIGH |
PM3VideoControl_VSYNC_ACTIVE_HIGH;
PM3_WRITE_REG(par, PM3VideoControl, video);
}
PM3_WRITE_REG(par, PM3VClkCtl,
(PM3_READ_REG(par, PM3VClkCtl) & 0xFFFFFFFC));
PM3_WRITE_REG(par, PM3ScreenBase, par->base);
PM3_WRITE_REG(par, PM3ChipConfig,
(PM3_READ_REG(par, PM3ChipConfig) & 0xFFFFFFFD));
wmb();
{
unsigned char uninitialized_var(m); /* ClkPreScale */
unsigned char uninitialized_var(n); /* ClkFeedBackScale */
unsigned char uninitialized_var(p); /* ClkPostScale */
unsigned long pixclock = PICOS2KHZ(info->var.pixclock);
(void)pm3fb_calculate_clock(pixclock, &m, &n, &p);
DPRINTK("Pixclock: %ld, Pre: %d, Feedback: %d, Post: %d\n",
pixclock, (int) m, (int) n, (int) p);
PM3_WRITE_DAC_REG(par, PM3RD_DClk0PreScale, m);
PM3_WRITE_DAC_REG(par, PM3RD_DClk0FeedbackScale, n);
PM3_WRITE_DAC_REG(par, PM3RD_DClk0PostScale, p);
}
/*
PM3_WRITE_DAC_REG(par, PM3RD_IndexControl, 0x00);
*/
/*
PM3_SLOW_WRITE_REG(par, PM3RD_IndexControl, 0x00);
*/
if ((par->video & PM3VideoControl_HSYNC_MASK) ==
PM3VideoControl_HSYNC_ACTIVE_HIGH)
tempsync |= PM3RD_SyncControl_HSYNC_ACTIVE_HIGH;
if ((par->video & PM3VideoControl_VSYNC_MASK) ==
PM3VideoControl_VSYNC_ACTIVE_HIGH)
tempsync |= PM3RD_SyncControl_VSYNC_ACTIVE_HIGH;
PM3_WRITE_DAC_REG(par, PM3RD_SyncControl, tempsync);
DPRINTK("PM3RD_SyncControl: %d\n", tempsync);
PM3_WRITE_DAC_REG(par, PM3RD_DACControl, 0x00);
switch (pm3fb_depth(&info->var)) {
case 8:
PM3_WRITE_DAC_REG(par, PM3RD_PixelSize,
PM3RD_PixelSize_8_BIT_PIXELS);
PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat,
PM3RD_ColorFormat_CI8_COLOR |
PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW);
tempmisc |= PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE;
break;
case 12:
PM3_WRITE_DAC_REG(par, PM3RD_PixelSize,
PM3RD_PixelSize_16_BIT_PIXELS);
PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat,
PM3RD_ColorFormat_4444_COLOR |
PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW |
PM3RD_ColorFormat_LINEAR_COLOR_EXT_ENABLE);
tempmisc |= PM3RD_MiscControl_DIRECTCOLOR_ENABLE |
PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE;
break;
case 15:
PM3_WRITE_DAC_REG(par, PM3RD_PixelSize,
PM3RD_PixelSize_16_BIT_PIXELS);
PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat,
PM3RD_ColorFormat_5551_FRONT_COLOR |
PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW |
PM3RD_ColorFormat_LINEAR_COLOR_EXT_ENABLE);
tempmisc |= PM3RD_MiscControl_DIRECTCOLOR_ENABLE |
PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE;
break;
case 16:
PM3_WRITE_DAC_REG(par, PM3RD_PixelSize,
PM3RD_PixelSize_16_BIT_PIXELS);
PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat,
PM3RD_ColorFormat_565_FRONT_COLOR |
PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW |
PM3RD_ColorFormat_LINEAR_COLOR_EXT_ENABLE);
tempmisc |= PM3RD_MiscControl_DIRECTCOLOR_ENABLE |
PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE;
break;
case 32:
PM3_WRITE_DAC_REG(par, PM3RD_PixelSize,
PM3RD_PixelSize_32_BIT_PIXELS);
PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat,
PM3RD_ColorFormat_8888_COLOR |
PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW);
tempmisc |= PM3RD_MiscControl_DIRECTCOLOR_ENABLE |
PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE;
break;
}
PM3_WRITE_DAC_REG(par, PM3RD_MiscControl, tempmisc);
}
/*
* hardware independent functions
*/
static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
u32 lpitch;
unsigned bpp = var->red.length + var->green.length
+ var->blue.length + var->transp.length;
if (bpp != var->bits_per_pixel) {
/* set predefined mode for bits_per_pixel settings */
switch (var->bits_per_pixel) {
case 8:
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->red.offset = 0;
var->green.offset = 0;
var->blue.offset = 0;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 16:
var->red.length = 5;
var->blue.length = 5;
var->green.length = 6;
var->transp.length = 0;
break;
case 32:
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 8;
break;
default:
DPRINTK("depth not supported: %u\n",
var->bits_per_pixel);
return -EINVAL;
}
}
/* it is assumed BGRA order */
if (var->bits_per_pixel > 8 ) {
var->blue.offset = 0;
var->green.offset = var->blue.length;
var->red.offset = var->green.offset + var->green.length;
var->transp.offset = var->red.offset + var->red.length;
}
var->height = -1;
var->width = -1;
if (var->xres != var->xres_virtual) {
DPRINTK("virtual x resolution != "
"physical x resolution not supported\n");
return -EINVAL;
}
if (var->yres > var->yres_virtual) {
DPRINTK("virtual y resolution < "
"physical y resolution not possible\n");
return -EINVAL;
}
if (var->xoffset) {
DPRINTK("xoffset not supported\n");
return -EINVAL;
}
if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
DPRINTK("interlace not supported\n");
return -EINVAL;
}
var->xres = (var->xres + 31) & ~31; /* could sometimes be 8 */
lpitch = var->xres * ((var->bits_per_pixel + 7) >> 3);
if (var->xres < 200 || var->xres > 2048) {
DPRINTK("width not supported: %u\n", var->xres);
return -EINVAL;
}
if (var->yres < 200 || var->yres > 4095) {
DPRINTK("height not supported: %u\n", var->yres);
return -EINVAL;
}
if (lpitch * var->yres_virtual > info->fix.smem_len) {
DPRINTK("no memory for screen (%ux%ux%u)\n",
var->xres, var->yres_virtual, var->bits_per_pixel);
return -EINVAL;
}
if (PICOS2KHZ(var->pixclock) > PM3_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock));
return -EINVAL;
}
var->accel_flags = 0; /* Can't mmap if this is on */
DPRINTK("Checking graphics mode at %dx%d depth %d\n",
var->xres, var->yres, var->bits_per_pixel);
return 0;
}
static int pm3fb_set_par(struct fb_info *info)
{
struct pm3_par *par = info->par;
const u32 xres = (info->var.xres + 31) & ~31;
const unsigned bpp = info->var.bits_per_pixel;
par->base = pm3fb_shift_bpp(bpp, (info->var.yoffset * xres)
+ info->var.xoffset);
par->video = 0;
if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
par->video |= PM3VideoControl_HSYNC_ACTIVE_HIGH;
else
par->video |= PM3VideoControl_HSYNC_ACTIVE_LOW;
if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
par->video |= PM3VideoControl_VSYNC_ACTIVE_HIGH;
else
par->video |= PM3VideoControl_VSYNC_ACTIVE_LOW;
if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE)
par->video |= PM3VideoControl_LINE_DOUBLE_ON;
if ((info->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW)
par->video |= PM3VideoControl_ENABLE;
else
DPRINTK("PM3Video disabled\n");
switch (bpp) {
case 8:
par->video |= PM3VideoControl_PIXELSIZE_8BIT;
break;
case 16:
par->video |= PM3VideoControl_PIXELSIZE_16BIT;
break;
case 32:
par->video |= PM3VideoControl_PIXELSIZE_32BIT;
break;
default:
DPRINTK("Unsupported depth\n");
break;
}
info->fix.visual =
(bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
info->fix.line_length = ((info->var.xres_virtual + 7) >> 3) * bpp;
/* pm3fb_clear_memory(info, 0);*/
pm3fb_clear_colormap(par, 0, 0, 0);
PM3_WRITE_DAC_REG(par, PM3RD_CursorMode, 0);
pm3fb_init_engine(info);
pm3fb_write_mode(info);
return 0;
}
static int pm3fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct pm3_par *par = info->par;
if (regno >= 256) /* no. of hw registers */
return -EINVAL;
/* grayscale works only partially under directcolor */
/* grayscale = 0.30*R + 0.59*G + 0.11*B */
if (info->var.grayscale)
red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
/* Directcolor:
* var->{color}.offset contains start of bitfield
* var->{color}.length contains length of bitfield
* {hardwarespecific} contains width of DAC
* pseudo_palette[X] is programmed to (X << red.offset) |
* (X << green.offset) |
* (X << blue.offset)
* RAMDAC[X] is programmed to (red, green, blue)
* color depth = SUM(var->{color}.length)
*
* Pseudocolor:
* var->{color}.offset is 0
* var->{color}.length contains width of DAC or the number
* of unique colors available (color depth)
* pseudo_palette is not used
* RAMDAC[X] is programmed to (red, green, blue)
* color depth = var->{color}.length
*/
/*
* This is the point where the color is converted to something that
* is acceptable by the hardware.
*/
#define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF - (val)) >> 16)
red = CNVT_TOHW(red, info->var.red.length);
green = CNVT_TOHW(green, info->var.green.length);
blue = CNVT_TOHW(blue, info->var.blue.length);
transp = CNVT_TOHW(transp, info->var.transp.length);
#undef CNVT_TOHW
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
u32 v;
if (regno >= 16)
return -EINVAL;
v = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset) |
(transp << info->var.transp.offset);
switch (info->var.bits_per_pixel) {
case 8:
break;
case 16:
case 32:
((u32 *)(info->pseudo_palette))[regno] = v;
break;
}
return 0;
} else if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR)
pm3fb_set_color(par, regno, red, green, blue);
return 0;
}
static int pm3fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct pm3_par *par = info->par;
const u32 xres = (info->var.xres + 31) & ~31;
par->base = pm3fb_shift_bpp(info->var.bits_per_pixel,
(var->yoffset * xres)
+ var->xoffset);
PM3_WAIT(par, 1);
PM3_WRITE_REG(par, PM3ScreenBase, par->base);
return 0;
}
static int pm3fb_blank(int blank_mode, struct fb_info *info)
{
struct pm3_par *par = info->par;
u32 video = par->video;
/*
* Oxygen VX1 - it appears that setting PM3VideoControl and
* then PM3RD_SyncControl to the same SYNC settings undoes
* any net change - they seem to xor together. Only set the
* sync options in PM3RD_SyncControl. --rmk
*/
video &= ~(PM3VideoControl_HSYNC_MASK |
PM3VideoControl_VSYNC_MASK);
video |= PM3VideoControl_HSYNC_ACTIVE_HIGH |
PM3VideoControl_VSYNC_ACTIVE_HIGH;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
video |= PM3VideoControl_ENABLE;
break;
case FB_BLANK_NORMAL:
video &= ~PM3VideoControl_ENABLE;
break;
case FB_BLANK_HSYNC_SUSPEND:
video &= ~(PM3VideoControl_HSYNC_MASK |
PM3VideoControl_BLANK_ACTIVE_LOW);
break;
case FB_BLANK_VSYNC_SUSPEND:
video &= ~(PM3VideoControl_VSYNC_MASK |
PM3VideoControl_BLANK_ACTIVE_LOW);
break;
case FB_BLANK_POWERDOWN:
video &= ~(PM3VideoControl_HSYNC_MASK |
PM3VideoControl_VSYNC_MASK |
PM3VideoControl_BLANK_ACTIVE_LOW);
break;
default:
DPRINTK("Unsupported blanking %d\n", blank_mode);
return 1;
}
PM3_WAIT(par, 1);
PM3_WRITE_REG(par, PM3VideoControl, video);
return 0;
}
/*
* Frame buffer operations
*/
static struct fb_ops pm3fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = pm3fb_check_var,
.fb_set_par = pm3fb_set_par,
.fb_setcolreg = pm3fb_setcolreg,
.fb_pan_display = pm3fb_pan_display,
.fb_fillrect = pm3fb_fillrect,
.fb_copyarea = pm3fb_copyarea,
.fb_imageblit = pm3fb_imageblit,
.fb_blank = pm3fb_blank,
.fb_sync = pm3fb_sync,
.fb_cursor = pm3fb_cursor,
};
/* ------------------------------------------------------------------------- */
/*
* Initialization
*/
/* mmio register are already mapped when this function is called */
/* the pm3fb_fix.smem_start is also set */
static unsigned long pm3fb_size_memory(struct pm3_par *par)
{
unsigned long memsize = 0;
unsigned long tempBypass, i, temp1, temp2;
unsigned char __iomem *screen_mem;
pm3fb_fix.smem_len = 64 * 1024l * 1024; /* request full aperture size */
/* Linear frame buffer - request region and map it. */
if (!request_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len,
"pm3fb smem")) {
printk(KERN_WARNING "pm3fb: Can't reserve smem.\n");
return 0;
}
screen_mem =
ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
if (!screen_mem) {
printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n");
release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
return 0;
}
/* TODO: card-specific stuff, *before* accessing *any* FB memory */
/* For Appian Jeronimo 2000 board second head */
tempBypass = PM3_READ_REG(par, PM3MemBypassWriteMask);
DPRINTK("PM3MemBypassWriteMask was: 0x%08lx\n", tempBypass);
PM3_WAIT(par, 1);
PM3_WRITE_REG(par, PM3MemBypassWriteMask, 0xFFFFFFFF);
/* pm3 split up memory, replicates, and do a lot of
* nasty stuff IMHO ;-)
*/
for (i = 0; i < 32; i++) {
fb_writel(i * 0x00345678,
(screen_mem + (i * 1048576)));
mb();
temp1 = fb_readl((screen_mem + (i * 1048576)));
/* Let's check for wrapover, write will fail at 16MB boundary */
if (temp1 == (i * 0x00345678))
memsize = i;
else
break;
}
DPRINTK("First detect pass already got %ld MB\n", memsize + 1);
if (memsize + 1 == i) {
for (i = 0; i < 32; i++) {
/* Clear first 32MB ; 0 is 0, no need to byteswap */
writel(0x0000000, (screen_mem + (i * 1048576)));
}
wmb();
for (i = 32; i < 64; i++) {
fb_writel(i * 0x00345678,
(screen_mem + (i * 1048576)));
mb();
temp1 =
fb_readl((screen_mem + (i * 1048576)));
temp2 =
fb_readl((screen_mem + ((i - 32) * 1048576)));
/* different value, different RAM... */
if ((temp1 == (i * 0x00345678)) && (temp2 == 0))
memsize = i;
else
break;
}
}
DPRINTK("Second detect pass got %ld MB\n", memsize + 1);
PM3_WAIT(par, 1);
PM3_WRITE_REG(par, PM3MemBypassWriteMask, tempBypass);
iounmap(screen_mem);
release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
memsize = 1048576 * (memsize + 1);
DPRINTK("Returning 0x%08lx bytes\n", memsize);
return memsize;
}
static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct fb_info *info;
struct pm3_par *par;
struct device *device = &dev->dev; /* for pci drivers */
int err;
int retval = -ENXIO;
err = pci_enable_device(dev);
if (err) {
printk(KERN_WARNING "pm3fb: Can't enable PCI dev: %d\n", err);
return err;
}
/*
* Dynamically allocate info and par
*/
info = framebuffer_alloc(sizeof(struct pm3_par), device);
if (!info)
return -ENOMEM;
par = info->par;
/*
* Here we set the screen_base to the virtual memory address
* for the framebuffer.
*/
pm3fb_fix.mmio_start = pci_resource_start(dev, 0);
pm3fb_fix.mmio_len = PM3_REGS_SIZE;
#if defined(__BIG_ENDIAN)
pm3fb_fix.mmio_start += PM3_REGS_SIZE;
DPRINTK("Adjusting register base for big-endian.\n");
#endif
/* Registers - request region and map it. */
if (!request_mem_region(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len,
"pm3fb regbase")) {
printk(KERN_WARNING "pm3fb: Can't reserve regbase.\n");
goto err_exit_neither;
}
par->v_regs =
ioremap_nocache(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
if (!par->v_regs) {
printk(KERN_WARNING "pm3fb: Can't remap %s register area.\n",
pm3fb_fix.id);
release_mem_region(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
goto err_exit_neither;
}
/* Linear frame buffer - request region and map it. */
pm3fb_fix.smem_start = pci_resource_start(dev, 1);
pm3fb_fix.smem_len = pm3fb_size_memory(par);
if (!pm3fb_fix.smem_len) {
printk(KERN_WARNING "pm3fb: Can't find memory on board.\n");
goto err_exit_mmio;
}
if (!request_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len,
"pm3fb smem")) {
printk(KERN_WARNING "pm3fb: Can't reserve smem.\n");
goto err_exit_mmio;
}
info->screen_base =
ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
if (!info->screen_base) {
printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n");
release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
goto err_exit_mmio;
}
info->screen_size = pm3fb_fix.smem_len;
#ifdef CONFIG_MTRR
if (!nomtrr)
par->mtrr_handle = mtrr_add(pm3fb_fix.smem_start,
pm3fb_fix.smem_len,
MTRR_TYPE_WRCOMB, 1);
#endif
info->fbops = &pm3fb_ops;
par->video = PM3_READ_REG(par, PM3VideoControl);
info->fix = pm3fb_fix;
info->pseudo_palette = par->palette;
info->flags = FBINFO_DEFAULT |
FBINFO_HWACCEL_XPAN |
FBINFO_HWACCEL_YPAN |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_FILLRECT;
if (noaccel) {
printk(KERN_DEBUG "disabling acceleration\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
info->pixmap.addr = kmalloc(PM3_PIXMAP_SIZE, GFP_KERNEL);
if (!info->pixmap.addr) {
retval = -ENOMEM;
goto err_exit_pixmap;
}
info->pixmap.size = PM3_PIXMAP_SIZE;
info->pixmap.buf_align = 4;
info->pixmap.scan_align = 4;
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
/*
* This should give a reasonable default video mode. The following is
* done when we can set a video mode.
*/
if (!mode_option)
mode_option = "640x480@60";
retval = fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8);
if (!retval || retval == 4) {
retval = -EINVAL;
goto err_exit_both;
}
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
retval = -ENOMEM;
goto err_exit_both;
}
/*
* For drivers that can...
*/
pm3fb_check_var(&info->var, info);
if (register_framebuffer(info) < 0) {
retval = -EINVAL;
goto err_exit_all;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
info->fix.id);
pci_set_drvdata(dev, info);
return 0;
err_exit_all:
fb_dealloc_cmap(&info->cmap);
err_exit_both:
kfree(info->pixmap.addr);
err_exit_pixmap:
iounmap(info->screen_base);
release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
err_exit_mmio:
iounmap(par->v_regs);
release_mem_region(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
err_exit_neither:
framebuffer_release(info);
return retval;
}
/*
* Cleanup
*/
static void pm3fb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
if (info) {
struct fb_fix_screeninfo *fix = &info->fix;
struct pm3_par *par = info->par;
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
#ifdef CONFIG_MTRR
if (par->mtrr_handle >= 0)
mtrr_del(par->mtrr_handle, info->fix.smem_start,
info->fix.smem_len);
#endif /* CONFIG_MTRR */
iounmap(info->screen_base);
release_mem_region(fix->smem_start, fix->smem_len);
iounmap(par->v_regs);
release_mem_region(fix->mmio_start, fix->mmio_len);
pci_set_drvdata(dev, NULL);
kfree(info->pixmap.addr);
framebuffer_release(info);
}
}
static struct pci_device_id pm3fb_id_table[] = {
{ PCI_VENDOR_ID_3DLABS, 0x0a,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
/* For PCI drivers */
static struct pci_driver pm3fb_driver = {
.name = "pm3fb",
.id_table = pm3fb_id_table,
.probe = pm3fb_probe,
.remove = pm3fb_remove,
};
MODULE_DEVICE_TABLE(pci, pm3fb_id_table);
#ifndef MODULE
/*
* Setup
*/
/*
* Only necessary if your driver takes special options,
* otherwise we fall back on the generic fb_setup().
*/
static int __init pm3fb_setup(char *options)
{
char *this_opt;
/* Parse user specified options (`video=pm3fb:') */
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
else if (!strncmp(this_opt, "noaccel", 7))
noaccel = 1;
else if (!strncmp(this_opt, "hwcursor=", 9))
hwcursor = simple_strtoul(this_opt + 9, NULL, 0);
#ifdef CONFIG_MTRR
else if (!strncmp(this_opt, "nomtrr", 6))
nomtrr = 1;
#endif
else
mode_option = this_opt;
}
return 0;
}
#endif /* MODULE */
static int __init pm3fb_init(void)
{
/*
* For kernel boot options (in 'video=pm3fb:<options>' format)
*/
#ifndef MODULE
char *option = NULL;
if (fb_get_options("pm3fb", &option))
return -ENODEV;
pm3fb_setup(option);
#endif
return pci_register_driver(&pm3fb_driver);
}
#ifdef MODULE
static void __exit pm3fb_exit(void)
{
pci_unregister_driver(&pm3fb_driver);
}
module_exit(pm3fb_exit);
#endif
module_init(pm3fb_init);
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
module_param(noaccel, bool, 0);
MODULE_PARM_DESC(noaccel, "Disable acceleration");
module_param(hwcursor, int, 0644);
MODULE_PARM_DESC(hwcursor, "Enable hardware cursor "
"(1=enable, 0=disable, default=1)");
#ifdef CONFIG_MTRR
module_param(nomtrr, bool, 0);
MODULE_PARM_DESC(nomtrr, "Disable MTRR support (0 or 1=disabled) (default=0)");
#endif
MODULE_DESCRIPTION("Permedia3 framebuffer device driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
elelinux/android_kernel_htc_pyramid | drivers/isdn/hisax/hfc4s8s_l1.c | 4187 | 43692 | /*************************************************************************/
/* $Id: hfc4s8s_l1.c,v 1.10 2005/02/09 16:31:09 martinb1 Exp $ */
/* HFC-4S/8S low layer interface for Cologne Chip HFC-4S/8S isdn chips */
/* The low layer (L1) is implemented as a loadable module for usage with */
/* the HiSax isdn driver for passive cards. */
/* */
/* Author: Werner Cornelius */
/* (C) 2003 Cornelius Consult (werner@cornelius-consult.de) */
/* */
/* Driver maintained by Cologne Chip */
/* - Martin Bachem, support@colognechip.com */
/* */
/* This driver only works with chip revisions >= 1, older revision 0 */
/* engineering samples (only first manufacturer sample cards) will not */
/* work and are rejected by the driver. */
/* */
/* This file distributed under the GNU GPL. */
/* */
/* See Version History at the end of this file */
/* */
/*************************************************************************/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "hisax_if.h"
#include "hfc4s8s_l1.h"
static const char hfc4s8s_rev[] = "Revision: 1.10";
/***************************************************************/
/* adjustable transparent mode fifo threshold */
/* The value defines the used fifo threshold with the equation */
/* */
/* notify number of bytes = 2 * 2 ^ TRANS_FIFO_THRES */
/* */
/* The default value is 5 which results in a buffer size of 64 */
/* and an interrupt rate of 8ms. */
/* The maximum value is 7 due to fifo size restrictions. */
/* Values below 3-4 are not recommended due to high interrupt */
/* load of the processor. For non critical applications the */
/* value should be raised to 7 to reduce any interrupt overhead*/
/***************************************************************/
#define TRANS_FIFO_THRES 5
/*************/
/* constants */
/*************/
#define CLOCKMODE_0 0 /* ext. 24.576 MhZ clk freq, int. single clock mode */
#define CLOCKMODE_1 1 /* ext. 49.576 MhZ clk freq, int. single clock mode */
#define CHIP_ID_SHIFT 4
#define HFC_MAX_ST 8
#define MAX_D_FRAME_SIZE 270
#define MAX_B_FRAME_SIZE 1536
#define TRANS_TIMER_MODE (TRANS_FIFO_THRES & 0xf)
#define TRANS_FIFO_BYTES (2 << TRANS_FIFO_THRES)
#define MAX_F_CNT 0x0f
#define CLKDEL_NT 0x6c
#define CLKDEL_TE 0xf
#define CTRL0_NT 4
#define CTRL0_TE 0
#define L1_TIMER_T4 2 /* minimum in jiffies */
#define L1_TIMER_T3 (7 * HZ) /* activation timeout */
#define L1_TIMER_T1 ((120 * HZ) / 1000) /* NT mode deactivation timeout */
/******************/
/* types and vars */
/******************/
static int card_cnt;
/* private driver_data */
typedef struct {
int chip_id;
int clock_mode;
int max_st_ports;
char *device_name;
} hfc4s8s_param;
static struct pci_device_id hfc4s8s_ids[] = {
{.vendor = PCI_VENDOR_ID_CCD,
.device = PCI_DEVICE_ID_4S,
.subvendor = 0x1397,
.subdevice = 0x08b4,
.driver_data =
(unsigned long) &((hfc4s8s_param) {CHIP_ID_4S, CLOCKMODE_0, 4,
"HFC-4S Evaluation Board"}),
},
{.vendor = PCI_VENDOR_ID_CCD,
.device = PCI_DEVICE_ID_8S,
.subvendor = 0x1397,
.subdevice = 0x16b8,
.driver_data =
(unsigned long) &((hfc4s8s_param) {CHIP_ID_8S, CLOCKMODE_0, 8,
"HFC-8S Evaluation Board"}),
},
{.vendor = PCI_VENDOR_ID_CCD,
.device = PCI_DEVICE_ID_4S,
.subvendor = 0x1397,
.subdevice = 0xb520,
.driver_data =
(unsigned long) &((hfc4s8s_param) {CHIP_ID_4S, CLOCKMODE_1, 4,
"IOB4ST"}),
},
{.vendor = PCI_VENDOR_ID_CCD,
.device = PCI_DEVICE_ID_8S,
.subvendor = 0x1397,
.subdevice = 0xb522,
.driver_data =
(unsigned long) &((hfc4s8s_param) {CHIP_ID_8S, CLOCKMODE_1, 8,
"IOB8ST"}),
},
{}
};
MODULE_DEVICE_TABLE(pci, hfc4s8s_ids);
MODULE_AUTHOR("Werner Cornelius, werner@cornelius-consult.de");
MODULE_DESCRIPTION("ISDN layer 1 for Cologne Chip HFC-4S/8S chips");
MODULE_LICENSE("GPL");
/***********/
/* layer 1 */
/***********/
struct hfc4s8s_btype {
spinlock_t lock;
struct hisax_b_if b_if;
struct hfc4s8s_l1 *l1p;
struct sk_buff_head tx_queue;
struct sk_buff *tx_skb;
struct sk_buff *rx_skb;
__u8 *rx_ptr;
int tx_cnt;
int bchan;
int mode;
};
struct _hfc4s8s_hw;
struct hfc4s8s_l1 {
spinlock_t lock;
struct _hfc4s8s_hw *hw; /* pointer to hardware area */
int l1_state; /* actual l1 state */
struct timer_list l1_timer; /* layer 1 timer structure */
int nt_mode; /* set to nt mode */
int st_num; /* own index */
int enabled; /* interface is enabled */
struct sk_buff_head d_tx_queue; /* send queue */
int tx_cnt; /* bytes to send */
struct hisax_d_if d_if; /* D-channel interface */
struct hfc4s8s_btype b_ch[2]; /* B-channel data */
struct hisax_b_if *b_table[2];
};
/**********************/
/* hardware structure */
/**********************/
typedef struct _hfc4s8s_hw {
spinlock_t lock;
int cardnum;
int ifnum;
int iobase;
int nt_mode;
u_char *membase;
u_char *hw_membase;
void *pdev;
int max_fifo;
hfc4s8s_param driver_data;
int irq;
int fifo_sched_cnt;
struct work_struct tqueue;
struct hfc4s8s_l1 l1[HFC_MAX_ST];
char card_name[60];
struct {
u_char r_irq_ctrl;
u_char r_ctrl0;
volatile u_char r_irq_statech; /* active isdn l1 status */
u_char r_irqmsk_statchg; /* enabled isdn status ints */
u_char r_irq_fifo_blx[8]; /* fifo status registers */
u_char fifo_rx_trans_enables[8]; /* mask for enabled transparent rx fifos */
u_char fifo_slow_timer_service[8]; /* mask for fifos needing slower timer service */
volatile u_char r_irq_oview; /* contents of overview register */
volatile u_char timer_irq;
int timer_usg_cnt; /* number of channels using timer */
} mr;
} hfc4s8s_hw;
/***************************/
/* inline function defines */
/***************************/
#ifdef HISAX_HFC4S8S_PCIMEM /* inline functions memory mapped */
/* memory write and dummy IO read to avoid PCI byte merge problems */
#define Write_hfc8(a,b,c) {(*((volatile u_char *)(a->membase+b)) = c); inb(a->iobase+4);}
/* memory write without dummy IO access for fifo data access */
#define fWrite_hfc8(a,b,c) (*((volatile u_char *)(a->membase+b)) = c)
#define Read_hfc8(a,b) (*((volatile u_char *)(a->membase+b)))
#define Write_hfc16(a,b,c) (*((volatile unsigned short *)(a->membase+b)) = c)
#define Read_hfc16(a,b) (*((volatile unsigned short *)(a->membase+b)))
#define Write_hfc32(a,b,c) (*((volatile unsigned long *)(a->membase+b)) = c)
#define Read_hfc32(a,b) (*((volatile unsigned long *)(a->membase+b)))
#define wait_busy(a) {while ((Read_hfc8(a, R_STATUS) & M_BUSY));}
#define PCI_ENA_MEMIO 0x03
#else
/* inline functions io mapped */
static inline void
SetRegAddr(hfc4s8s_hw * a, u_char b)
{
outb(b, (a->iobase) + 4);
}
static inline u_char
GetRegAddr(hfc4s8s_hw * a)
{
return (inb((volatile u_int) (a->iobase + 4)));
}
static inline void
Write_hfc8(hfc4s8s_hw * a, u_char b, u_char c)
{
SetRegAddr(a, b);
outb(c, a->iobase);
}
static inline void
fWrite_hfc8(hfc4s8s_hw * a, u_char c)
{
outb(c, a->iobase);
}
static inline void
Write_hfc16(hfc4s8s_hw * a, u_char b, u_short c)
{
SetRegAddr(a, b);
outw(c, a->iobase);
}
static inline void
Write_hfc32(hfc4s8s_hw * a, u_char b, u_long c)
{
SetRegAddr(a, b);
outl(c, a->iobase);
}
static inline void
fWrite_hfc32(hfc4s8s_hw * a, u_long c)
{
outl(c, a->iobase);
}
static inline u_char
Read_hfc8(hfc4s8s_hw * a, u_char b)
{
SetRegAddr(a, b);
return (inb((volatile u_int) a->iobase));
}
static inline u_char
fRead_hfc8(hfc4s8s_hw * a)
{
return (inb((volatile u_int) a->iobase));
}
static inline u_short
Read_hfc16(hfc4s8s_hw * a, u_char b)
{
SetRegAddr(a, b);
return (inw((volatile u_int) a->iobase));
}
static inline u_long
Read_hfc32(hfc4s8s_hw * a, u_char b)
{
SetRegAddr(a, b);
return (inl((volatile u_int) a->iobase));
}
static inline u_long
fRead_hfc32(hfc4s8s_hw * a)
{
return (inl((volatile u_int) a->iobase));
}
static inline void
wait_busy(hfc4s8s_hw * a)
{
SetRegAddr(a, R_STATUS);
while (inb((volatile u_int) a->iobase) & M_BUSY);
}
#define PCI_ENA_REGIO 0x01
#endif /* HISAX_HFC4S8S_PCIMEM */
/******************************************************/
/* function to read critical counter registers that */
/* may be updated by the chip during read */
/******************************************************/
static u_char
Read_hfc8_stable(hfc4s8s_hw * hw, int reg)
{
u_char ref8;
u_char in8;
ref8 = Read_hfc8(hw, reg);
while (((in8 = Read_hfc8(hw, reg)) != ref8)) {
ref8 = in8;
}
return in8;
}
static int
Read_hfc16_stable(hfc4s8s_hw * hw, int reg)
{
int ref16;
int in16;
ref16 = Read_hfc16(hw, reg);
while (((in16 = Read_hfc16(hw, reg)) != ref16)) {
ref16 = in16;
}
return in16;
}
/*****************************/
/* D-channel call from HiSax */
/*****************************/
static void
dch_l2l1(struct hisax_d_if *iface, int pr, void *arg)
{
struct hfc4s8s_l1 *l1 = iface->ifc.priv;
struct sk_buff *skb = (struct sk_buff *) arg;
u_long flags;
switch (pr) {
case (PH_DATA | REQUEST):
if (!l1->enabled) {
dev_kfree_skb(skb);
break;
}
spin_lock_irqsave(&l1->lock, flags);
skb_queue_tail(&l1->d_tx_queue, skb);
if ((skb_queue_len(&l1->d_tx_queue) == 1) &&
(l1->tx_cnt <= 0)) {
l1->hw->mr.r_irq_fifo_blx[l1->st_num] |=
0x10;
spin_unlock_irqrestore(&l1->lock, flags);
schedule_work(&l1->hw->tqueue);
} else
spin_unlock_irqrestore(&l1->lock, flags);
break;
case (PH_ACTIVATE | REQUEST):
if (!l1->enabled)
break;
if (!l1->nt_mode) {
if (l1->l1_state < 6) {
spin_lock_irqsave(&l1->lock,
flags);
Write_hfc8(l1->hw, R_ST_SEL,
l1->st_num);
Write_hfc8(l1->hw, A_ST_WR_STA,
0x60);
mod_timer(&l1->l1_timer,
jiffies + L1_TIMER_T3);
spin_unlock_irqrestore(&l1->lock,
flags);
} else if (l1->l1_state == 7)
l1->d_if.ifc.l1l2(&l1->d_if.ifc,
PH_ACTIVATE |
INDICATION,
NULL);
} else {
if (l1->l1_state != 3) {
spin_lock_irqsave(&l1->lock,
flags);
Write_hfc8(l1->hw, R_ST_SEL,
l1->st_num);
Write_hfc8(l1->hw, A_ST_WR_STA,
0x60);
spin_unlock_irqrestore(&l1->lock,
flags);
} else if (l1->l1_state == 3)
l1->d_if.ifc.l1l2(&l1->d_if.ifc,
PH_ACTIVATE |
INDICATION,
NULL);
}
break;
default:
printk(KERN_INFO
"HFC-4S/8S: Unknown D-chan cmd 0x%x received, ignored\n",
pr);
break;
}
if (!l1->enabled)
l1->d_if.ifc.l1l2(&l1->d_if.ifc,
PH_DEACTIVATE | INDICATION, NULL);
} /* dch_l2l1 */
/*****************************/
/* B-channel call from HiSax */
/*****************************/
static void
bch_l2l1(struct hisax_if *ifc, int pr, void *arg)
{
struct hfc4s8s_btype *bch = ifc->priv;
struct hfc4s8s_l1 *l1 = bch->l1p;
struct sk_buff *skb = (struct sk_buff *) arg;
long mode = (long) arg;
u_long flags;
switch (pr) {
case (PH_DATA | REQUEST):
if (!l1->enabled || (bch->mode == L1_MODE_NULL)) {
dev_kfree_skb(skb);
break;
}
spin_lock_irqsave(&l1->lock, flags);
skb_queue_tail(&bch->tx_queue, skb);
if (!bch->tx_skb && (bch->tx_cnt <= 0)) {
l1->hw->mr.r_irq_fifo_blx[l1->st_num] |=
((bch->bchan == 1) ? 1 : 4);
spin_unlock_irqrestore(&l1->lock, flags);
schedule_work(&l1->hw->tqueue);
} else
spin_unlock_irqrestore(&l1->lock, flags);
break;
case (PH_ACTIVATE | REQUEST):
case (PH_DEACTIVATE | REQUEST):
if (!l1->enabled)
break;
if (pr == (PH_DEACTIVATE | REQUEST))
mode = L1_MODE_NULL;
switch (mode) {
case L1_MODE_HDLC:
spin_lock_irqsave(&l1->lock,
flags);
l1->hw->mr.timer_usg_cnt++;
l1->hw->mr.
fifo_slow_timer_service[l1->
st_num]
|=
((bch->bchan ==
1) ? 0x2 : 0x8);
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan ==
1) ? 0 : 2)));
wait_busy(l1->hw);
Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */
Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable TX interrupts for hdlc */
Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
wait_busy(l1->hw);
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan ==
1) ? 1 : 3)));
wait_busy(l1->hw);
Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */
Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable RX interrupts for hdlc */
Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
Write_hfc8(l1->hw, R_ST_SEL,
l1->st_num);
l1->hw->mr.r_ctrl0 |=
(bch->bchan & 3);
Write_hfc8(l1->hw, A_ST_CTRL0,
l1->hw->mr.r_ctrl0);
bch->mode = L1_MODE_HDLC;
spin_unlock_irqrestore(&l1->lock,
flags);
bch->b_if.ifc.l1l2(&bch->b_if.ifc,
PH_ACTIVATE |
INDICATION,
NULL);
break;
case L1_MODE_TRANS:
spin_lock_irqsave(&l1->lock,
flags);
l1->hw->mr.
fifo_rx_trans_enables[l1->
st_num]
|=
((bch->bchan ==
1) ? 0x2 : 0x8);
l1->hw->mr.timer_usg_cnt++;
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan ==
1) ? 0 : 2)));
wait_busy(l1->hw);
Write_hfc8(l1->hw, A_CON_HDLC, 0xf); /* Transparent mode, 1 fill, connect ST */
Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable TX interrupts */
Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
wait_busy(l1->hw);
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan ==
1) ? 1 : 3)));
wait_busy(l1->hw);
Write_hfc8(l1->hw, A_CON_HDLC, 0xf); /* Transparent mode, 1 fill, connect ST */
Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable RX interrupts */
Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
Write_hfc8(l1->hw, R_ST_SEL,
l1->st_num);
l1->hw->mr.r_ctrl0 |=
(bch->bchan & 3);
Write_hfc8(l1->hw, A_ST_CTRL0,
l1->hw->mr.r_ctrl0);
bch->mode = L1_MODE_TRANS;
spin_unlock_irqrestore(&l1->lock,
flags);
bch->b_if.ifc.l1l2(&bch->b_if.ifc,
PH_ACTIVATE |
INDICATION,
NULL);
break;
default:
if (bch->mode == L1_MODE_NULL)
break;
spin_lock_irqsave(&l1->lock,
flags);
l1->hw->mr.
fifo_slow_timer_service[l1->
st_num]
&=
~((bch->bchan ==
1) ? 0x3 : 0xc);
l1->hw->mr.
fifo_rx_trans_enables[l1->
st_num]
&=
~((bch->bchan ==
1) ? 0x3 : 0xc);
l1->hw->mr.timer_usg_cnt--;
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan ==
1) ? 0 : 2)));
wait_busy(l1->hw);
Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable TX interrupts */
wait_busy(l1->hw);
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan ==
1) ? 1 : 3)));
wait_busy(l1->hw);
Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable RX interrupts */
Write_hfc8(l1->hw, R_ST_SEL,
l1->st_num);
l1->hw->mr.r_ctrl0 &=
~(bch->bchan & 3);
Write_hfc8(l1->hw, A_ST_CTRL0,
l1->hw->mr.r_ctrl0);
spin_unlock_irqrestore(&l1->lock,
flags);
bch->mode = L1_MODE_NULL;
bch->b_if.ifc.l1l2(&bch->b_if.ifc,
PH_DEACTIVATE |
INDICATION,
NULL);
if (bch->tx_skb) {
dev_kfree_skb(bch->tx_skb);
bch->tx_skb = NULL;
}
if (bch->rx_skb) {
dev_kfree_skb(bch->rx_skb);
bch->rx_skb = NULL;
}
skb_queue_purge(&bch->tx_queue);
bch->tx_cnt = 0;
bch->rx_ptr = NULL;
break;
}
/* timer is only used when at least one b channel */
/* is set up to transparent mode */
if (l1->hw->mr.timer_usg_cnt) {
Write_hfc8(l1->hw, R_IRQMSK_MISC,
M_TI_IRQMSK);
} else {
Write_hfc8(l1->hw, R_IRQMSK_MISC, 0);
}
break;
default:
printk(KERN_INFO
"HFC-4S/8S: Unknown B-chan cmd 0x%x received, ignored\n",
pr);
break;
}
if (!l1->enabled)
bch->b_if.ifc.l1l2(&bch->b_if.ifc,
PH_DEACTIVATE | INDICATION, NULL);
} /* bch_l2l1 */
/**************************/
/* layer 1 timer function */
/**************************/
static void
hfc_l1_timer(struct hfc4s8s_l1 *l1)
{
u_long flags;
if (!l1->enabled)
return;
spin_lock_irqsave(&l1->lock, flags);
if (l1->nt_mode) {
l1->l1_state = 1;
Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
Write_hfc8(l1->hw, A_ST_WR_STA, 0x11);
spin_unlock_irqrestore(&l1->lock, flags);
l1->d_if.ifc.l1l2(&l1->d_if.ifc,
PH_DEACTIVATE | INDICATION, NULL);
spin_lock_irqsave(&l1->lock, flags);
l1->l1_state = 1;
Write_hfc8(l1->hw, A_ST_WR_STA, 0x1);
spin_unlock_irqrestore(&l1->lock, flags);
} else {
/* activation timed out */
Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
Write_hfc8(l1->hw, A_ST_WR_STA, 0x13);
spin_unlock_irqrestore(&l1->lock, flags);
l1->d_if.ifc.l1l2(&l1->d_if.ifc,
PH_DEACTIVATE | INDICATION, NULL);
spin_lock_irqsave(&l1->lock, flags);
Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
Write_hfc8(l1->hw, A_ST_WR_STA, 0x3);
spin_unlock_irqrestore(&l1->lock, flags);
}
} /* hfc_l1_timer */
/****************************************/
/* a complete D-frame has been received */
/****************************************/
static void
rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
{
int z1, z2;
u_char f1, f2, df;
struct sk_buff *skb;
u_char *cp;
if (!l1p->enabled)
return;
do {
/* E/D RX fifo */
Write_hfc8(l1p->hw, R_FIFO,
(l1p->st_num * 8 + ((ech) ? 7 : 5)));
wait_busy(l1p->hw);
f1 = Read_hfc8_stable(l1p->hw, A_F1);
f2 = Read_hfc8(l1p->hw, A_F2);
df = f1 - f2;
if ((f1 - f2) < 0)
df = f1 - f2 + MAX_F_CNT + 1;
if (!df) {
return; /* no complete frame in fifo */
}
z1 = Read_hfc16_stable(l1p->hw, A_Z1);
z2 = Read_hfc16(l1p->hw, A_Z2);
z1 = z1 - z2 + 1;
if (z1 < 0)
z1 += 384;
if (!(skb = dev_alloc_skb(MAX_D_FRAME_SIZE))) {
printk(KERN_INFO
"HFC-4S/8S: Could not allocate D/E "
"channel receive buffer");
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2);
wait_busy(l1p->hw);
return;
}
if (((z1 < 4) || (z1 > MAX_D_FRAME_SIZE))) {
if (skb)
dev_kfree_skb(skb);
/* remove errornous D frame */
if (df == 1) {
/* reset fifo */
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2);
wait_busy(l1p->hw);
return;
} else {
/* read errornous D frame */
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
#endif
while (z1 >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
Read_hfc32(l1p->hw, A_FIFO_DATA0);
#else
fRead_hfc32(l1p->hw);
#endif
z1 -= 4;
}
while (z1--)
#ifdef HISAX_HFC4S8S_PCIMEM
Read_hfc8(l1p->hw, A_FIFO_DATA0);
#else
fRead_hfc8(l1p->hw);
#endif
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
wait_busy(l1p->hw);
return;
}
}
cp = skb->data;
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
#endif
while (z1 >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
*((unsigned long *) cp) =
Read_hfc32(l1p->hw, A_FIFO_DATA0);
#else
*((unsigned long *) cp) = fRead_hfc32(l1p->hw);
#endif
cp += 4;
z1 -= 4;
}
while (z1--)
#ifdef HISAX_HFC4S8S_PCIMEM
*cp++ = Read_hfc8(l1p->hw, A_FIFO_DATA0);
#else
*cp++ = fRead_hfc8(l1p->hw);
#endif
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
wait_busy(l1p->hw);
if (*(--cp)) {
dev_kfree_skb(skb);
} else {
skb->len = (cp - skb->data) - 2;
if (ech)
l1p->d_if.ifc.l1l2(&l1p->d_if.ifc,
PH_DATA_E | INDICATION,
skb);
else
l1p->d_if.ifc.l1l2(&l1p->d_if.ifc,
PH_DATA | INDICATION,
skb);
}
} while (1);
} /* rx_d_frame */
/*************************************************************/
/* a B-frame has been received (perhaps not fully completed) */
/*************************************************************/
static void
rx_b_frame(struct hfc4s8s_btype *bch)
{
int z1, z2, hdlc_complete;
u_char f1, f2;
struct hfc4s8s_l1 *l1 = bch->l1p;
struct sk_buff *skb;
if (!l1->enabled || (bch->mode == L1_MODE_NULL))
return;
do {
/* RX Fifo */
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 + ((bch->bchan == 1) ? 1 : 3)));
wait_busy(l1->hw);
if (bch->mode == L1_MODE_HDLC) {
f1 = Read_hfc8_stable(l1->hw, A_F1);
f2 = Read_hfc8(l1->hw, A_F2);
hdlc_complete = ((f1 ^ f2) & MAX_F_CNT);
} else
hdlc_complete = 0;
z1 = Read_hfc16_stable(l1->hw, A_Z1);
z2 = Read_hfc16(l1->hw, A_Z2);
z1 = (z1 - z2);
if (hdlc_complete)
z1++;
if (z1 < 0)
z1 += 384;
if (!z1)
break;
if (!(skb = bch->rx_skb)) {
if (!
(skb =
dev_alloc_skb((bch->mode ==
L1_MODE_TRANS) ? z1
: (MAX_B_FRAME_SIZE + 3)))) {
printk(KERN_ERR
"HFC-4S/8S: Could not allocate B "
"channel receive buffer");
return;
}
bch->rx_ptr = skb->data;
bch->rx_skb = skb;
}
skb->len = (bch->rx_ptr - skb->data) + z1;
/* HDLC length check */
if ((bch->mode == L1_MODE_HDLC) &&
((hdlc_complete && (skb->len < 4)) ||
(skb->len > (MAX_B_FRAME_SIZE + 3)))) {
skb->len = 0;
bch->rx_ptr = skb->data;
Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
wait_busy(l1->hw);
return;
}
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1->hw, A_FIFO_DATA0);
#endif
while (z1 >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
*((unsigned long *) bch->rx_ptr) =
Read_hfc32(l1->hw, A_FIFO_DATA0);
#else
*((unsigned long *) bch->rx_ptr) =
fRead_hfc32(l1->hw);
#endif
bch->rx_ptr += 4;
z1 -= 4;
}
while (z1--)
#ifdef HISAX_HFC4S8S_PCIMEM
*(bch->rx_ptr++) = Read_hfc8(l1->hw, A_FIFO_DATA0);
#else
*(bch->rx_ptr++) = fRead_hfc8(l1->hw);
#endif
if (hdlc_complete) {
/* increment f counter */
Write_hfc8(l1->hw, A_INC_RES_FIFO, 1);
wait_busy(l1->hw);
/* hdlc crc check */
bch->rx_ptr--;
if (*bch->rx_ptr) {
skb->len = 0;
bch->rx_ptr = skb->data;
continue;
}
skb->len -= 3;
}
if (hdlc_complete || (bch->mode == L1_MODE_TRANS)) {
bch->rx_skb = NULL;
bch->rx_ptr = NULL;
bch->b_if.ifc.l1l2(&bch->b_if.ifc,
PH_DATA | INDICATION, skb);
}
} while (1);
} /* rx_b_frame */
/********************************************/
/* a D-frame has been/should be transmitted */
/********************************************/
static void
tx_d_frame(struct hfc4s8s_l1 *l1p)
{
struct sk_buff *skb;
u_char f1, f2;
u_char *cp;
long cnt;
if (l1p->l1_state != 7)
return;
/* TX fifo */
Write_hfc8(l1p->hw, R_FIFO, (l1p->st_num * 8 + 4));
wait_busy(l1p->hw);
f1 = Read_hfc8(l1p->hw, A_F1);
f2 = Read_hfc8_stable(l1p->hw, A_F2);
if ((f1 ^ f2) & MAX_F_CNT)
return; /* fifo is still filled */
if (l1p->tx_cnt > 0) {
cnt = l1p->tx_cnt;
l1p->tx_cnt = 0;
l1p->d_if.ifc.l1l2(&l1p->d_if.ifc, PH_DATA | CONFIRM,
(void *) cnt);
}
if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
cp = skb->data;
cnt = skb->len;
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1p->hw, A_FIFO_DATA0);
#endif
while (cnt >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
fWrite_hfc32(l1p->hw, A_FIFO_DATA0,
*(unsigned long *) cp);
#else
SetRegAddr(l1p->hw, A_FIFO_DATA0);
fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
#endif
cp += 4;
cnt -= 4;
}
#ifdef HISAX_HFC4S8S_PCIMEM
while (cnt--)
fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++);
#else
while (cnt--)
fWrite_hfc8(l1p->hw, *cp++);
#endif
l1p->tx_cnt = skb->truesize;
Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
wait_busy(l1p->hw);
dev_kfree_skb(skb);
}
} /* tx_d_frame */
/******************************************************/
/* a B-frame may be transmitted (or is not completed) */
/******************************************************/
static void
tx_b_frame(struct hfc4s8s_btype *bch)
{
struct sk_buff *skb;
struct hfc4s8s_l1 *l1 = bch->l1p;
u_char *cp;
int cnt, max, hdlc_num;
long ack_len = 0;
if (!l1->enabled || (bch->mode == L1_MODE_NULL))
return;
/* TX fifo */
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2)));
wait_busy(l1->hw);
do {
if (bch->mode == L1_MODE_HDLC) {
hdlc_num = Read_hfc8(l1->hw, A_F1) & MAX_F_CNT;
hdlc_num -=
(Read_hfc8_stable(l1->hw, A_F2) & MAX_F_CNT);
if (hdlc_num < 0)
hdlc_num += 16;
if (hdlc_num >= 15)
break; /* fifo still filled up with hdlc frames */
} else
hdlc_num = 0;
if (!(skb = bch->tx_skb)) {
if (!(skb = skb_dequeue(&bch->tx_queue))) {
l1->hw->mr.fifo_slow_timer_service[l1->
st_num]
&= ~((bch->bchan == 1) ? 1 : 4);
break; /* list empty */
}
bch->tx_skb = skb;
bch->tx_cnt = 0;
}
if (!hdlc_num)
l1->hw->mr.fifo_slow_timer_service[l1->st_num] |=
((bch->bchan == 1) ? 1 : 4);
else
l1->hw->mr.fifo_slow_timer_service[l1->st_num] &=
~((bch->bchan == 1) ? 1 : 4);
max = Read_hfc16_stable(l1->hw, A_Z2);
max -= Read_hfc16(l1->hw, A_Z1);
if (max <= 0)
max += 384;
max--;
if (max < 16)
break; /* don't write to small amounts of bytes */
cnt = skb->len - bch->tx_cnt;
if (cnt > max)
cnt = max;
cp = skb->data + bch->tx_cnt;
bch->tx_cnt += cnt;
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(l1->hw, A_FIFO_DATA0);
#endif
while (cnt >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
fWrite_hfc32(l1->hw, A_FIFO_DATA0,
*(unsigned long *) cp);
#else
fWrite_hfc32(l1->hw, *(unsigned long *) cp);
#endif
cp += 4;
cnt -= 4;
}
while (cnt--)
#ifdef HISAX_HFC4S8S_PCIMEM
fWrite_hfc8(l1->hw, A_FIFO_DATA0, *cp++);
#else
fWrite_hfc8(l1->hw, *cp++);
#endif
if (bch->tx_cnt >= skb->len) {
if (bch->mode == L1_MODE_HDLC) {
/* increment f counter */
Write_hfc8(l1->hw, A_INC_RES_FIFO, 1);
}
ack_len += skb->truesize;
bch->tx_skb = NULL;
bch->tx_cnt = 0;
dev_kfree_skb(skb);
} else
/* Re-Select */
Write_hfc8(l1->hw, R_FIFO,
(l1->st_num * 8 +
((bch->bchan == 1) ? 0 : 2)));
wait_busy(l1->hw);
} while (1);
if (ack_len)
bch->b_if.ifc.l1l2((struct hisax_if *) &bch->b_if,
PH_DATA | CONFIRM, (void *) ack_len);
} /* tx_b_frame */
/*************************************/
/* bottom half handler for interrupt */
/*************************************/
static void
hfc4s8s_bh(struct work_struct *work)
{
hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
u_char b;
struct hfc4s8s_l1 *l1p;
volatile u_char *fifo_stat;
int idx;
/* handle layer 1 state changes */
b = 1;
l1p = hw->l1;
while (b) {
if ((b & hw->mr.r_irq_statech)) {
/* reset l1 event */
hw->mr.r_irq_statech &= ~b;
if (l1p->enabled) {
if (l1p->nt_mode) {
u_char oldstate = l1p->l1_state;
Write_hfc8(l1p->hw, R_ST_SEL,
l1p->st_num);
l1p->l1_state =
Read_hfc8(l1p->hw,
A_ST_RD_STA) & 0xf;
if ((oldstate == 3)
&& (l1p->l1_state != 3))
l1p->d_if.ifc.l1l2(&l1p->
d_if.
ifc,
PH_DEACTIVATE
|
INDICATION,
NULL);
if (l1p->l1_state != 2) {
del_timer(&l1p->l1_timer);
if (l1p->l1_state == 3) {
l1p->d_if.ifc.
l1l2(&l1p->
d_if.ifc,
PH_ACTIVATE
|
INDICATION,
NULL);
}
} else {
/* allow transition */
Write_hfc8(hw, A_ST_WR_STA,
M_SET_G2_G3);
mod_timer(&l1p->l1_timer,
jiffies +
L1_TIMER_T1);
}
printk(KERN_INFO
"HFC-4S/8S: NT ch %d l1 state %d -> %d\n",
l1p->st_num, oldstate,
l1p->l1_state);
} else {
u_char oldstate = l1p->l1_state;
Write_hfc8(l1p->hw, R_ST_SEL,
l1p->st_num);
l1p->l1_state =
Read_hfc8(l1p->hw,
A_ST_RD_STA) & 0xf;
if (((l1p->l1_state == 3) &&
((oldstate == 7) ||
(oldstate == 8))) ||
((timer_pending
(&l1p->l1_timer))
&& (l1p->l1_state == 8))) {
mod_timer(&l1p->l1_timer,
L1_TIMER_T4 +
jiffies);
} else {
if (l1p->l1_state == 7) {
del_timer(&l1p->
l1_timer);
l1p->d_if.ifc.
l1l2(&l1p->
d_if.ifc,
PH_ACTIVATE
|
INDICATION,
NULL);
tx_d_frame(l1p);
}
if (l1p->l1_state == 3) {
if (oldstate != 3)
l1p->d_if.
ifc.
l1l2
(&l1p->
d_if.
ifc,
PH_DEACTIVATE
|
INDICATION,
NULL);
}
}
printk(KERN_INFO
"HFC-4S/8S: TE %d ch %d l1 state %d -> %d\n",
l1p->hw->cardnum,
l1p->st_num, oldstate,
l1p->l1_state);
}
}
}
b <<= 1;
l1p++;
}
/* now handle the fifos */
idx = 0;
fifo_stat = hw->mr.r_irq_fifo_blx;
l1p = hw->l1;
while (idx < hw->driver_data.max_st_ports) {
if (hw->mr.timer_irq) {
*fifo_stat |= hw->mr.fifo_rx_trans_enables[idx];
if (hw->fifo_sched_cnt <= 0) {
*fifo_stat |=
hw->mr.fifo_slow_timer_service[l1p->
st_num];
}
}
/* ignore fifo 6 (TX E fifo) */
*fifo_stat &= 0xff - 0x40;
while (*fifo_stat) {
if (!l1p->nt_mode) {
/* RX Fifo has data to read */
if ((*fifo_stat & 0x20)) {
*fifo_stat &= ~0x20;
rx_d_frame(l1p, 0);
}
/* E Fifo has data to read */
if ((*fifo_stat & 0x80)) {
*fifo_stat &= ~0x80;
rx_d_frame(l1p, 1);
}
/* TX Fifo completed send */
if ((*fifo_stat & 0x10)) {
*fifo_stat &= ~0x10;
tx_d_frame(l1p);
}
}
/* B1 RX Fifo has data to read */
if ((*fifo_stat & 0x2)) {
*fifo_stat &= ~0x2;
rx_b_frame(l1p->b_ch);
}
/* B1 TX Fifo has send completed */
if ((*fifo_stat & 0x1)) {
*fifo_stat &= ~0x1;
tx_b_frame(l1p->b_ch);
}
/* B2 RX Fifo has data to read */
if ((*fifo_stat & 0x8)) {
*fifo_stat &= ~0x8;
rx_b_frame(l1p->b_ch + 1);
}
/* B2 TX Fifo has send completed */
if ((*fifo_stat & 0x4)) {
*fifo_stat &= ~0x4;
tx_b_frame(l1p->b_ch + 1);
}
}
fifo_stat++;
l1p++;
idx++;
}
if (hw->fifo_sched_cnt <= 0)
hw->fifo_sched_cnt += (1 << (7 - TRANS_TIMER_MODE));
hw->mr.timer_irq = 0; /* clear requested timer irq */
} /* hfc4s8s_bh */
/*********************/
/* interrupt handler */
/*********************/
static irqreturn_t
hfc4s8s_interrupt(int intno, void *dev_id)
{
hfc4s8s_hw *hw = dev_id;
u_char b, ovr;
volatile u_char *ovp;
int idx;
u_char old_ioreg;
if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN))
return IRQ_NONE;
#ifndef HISAX_HFC4S8S_PCIMEM
/* read current selected regsister */
old_ioreg = GetRegAddr(hw);
#endif
/* Layer 1 State change */
hw->mr.r_irq_statech |=
(Read_hfc8(hw, R_SCI) & hw->mr.r_irqmsk_statchg);
if (!
(b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA)))
&& !hw->mr.r_irq_statech) {
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(hw, old_ioreg);
#endif
return IRQ_NONE;
}
/* timer event */
if (Read_hfc8(hw, R_IRQ_MISC) & M_TI_IRQ) {
hw->mr.timer_irq = 1;
hw->fifo_sched_cnt--;
}
/* FIFO event */
if ((ovr = Read_hfc8(hw, R_IRQ_OVIEW))) {
hw->mr.r_irq_oview |= ovr;
idx = R_IRQ_FIFO_BL0;
ovp = hw->mr.r_irq_fifo_blx;
while (ovr) {
if ((ovr & 1)) {
*ovp |= Read_hfc8(hw, idx);
}
ovp++;
idx++;
ovr >>= 1;
}
}
/* queue the request to allow other cards to interrupt */
schedule_work(&hw->tqueue);
#ifndef HISAX_HFC4S8S_PCIMEM
SetRegAddr(hw, old_ioreg);
#endif
return IRQ_HANDLED;
} /* hfc4s8s_interrupt */
/***********************************************************************/
/* reset the complete chip, don't release the chips irq but disable it */
/***********************************************************************/
static void
chipreset(hfc4s8s_hw * hw)
{
u_long flags;
spin_lock_irqsave(&hw->lock, flags);
Write_hfc8(hw, R_CTRL, 0); /* use internal RAM */
Write_hfc8(hw, R_RAM_MISC, 0); /* 32k*8 RAM */
Write_hfc8(hw, R_FIFO_MD, 0); /* fifo mode 386 byte/fifo simple mode */
Write_hfc8(hw, R_CIRM, M_SRES); /* reset chip */
hw->mr.r_irq_ctrl = 0; /* interrupt is inactive */
spin_unlock_irqrestore(&hw->lock, flags);
udelay(3);
Write_hfc8(hw, R_CIRM, 0); /* disable reset */
wait_busy(hw);
Write_hfc8(hw, R_PCM_MD0, M_PCM_MD); /* master mode */
Write_hfc8(hw, R_RAM_MISC, M_FZ_MD); /* transmit fifo option */
if (hw->driver_data.clock_mode == 1)
Write_hfc8(hw, R_BRG_PCM_CFG, M_PCM_CLK); /* PCM clk / 2 */
Write_hfc8(hw, R_TI_WD, TRANS_TIMER_MODE); /* timer interval */
memset(&hw->mr, 0, sizeof(hw->mr));
} /* chipreset */
/********************************************/
/* disable/enable hardware in nt or te mode */
/********************************************/
static void
hfc_hardware_enable(hfc4s8s_hw * hw, int enable, int nt_mode)
{
u_long flags;
char if_name[40];
int i;
if (enable) {
/* save system vars */
hw->nt_mode = nt_mode;
/* enable fifo and state irqs, but not global irq enable */
hw->mr.r_irq_ctrl = M_FIFO_IRQ;
Write_hfc8(hw, R_IRQ_CTRL, hw->mr.r_irq_ctrl);
hw->mr.r_irqmsk_statchg = 0;
Write_hfc8(hw, R_SCI_MSK, hw->mr.r_irqmsk_statchg);
Write_hfc8(hw, R_PWM_MD, 0x80);
Write_hfc8(hw, R_PWM1, 26);
if (!nt_mode)
Write_hfc8(hw, R_ST_SYNC, M_AUTO_SYNC);
/* enable the line interfaces and fifos */
for (i = 0; i < hw->driver_data.max_st_ports; i++) {
hw->mr.r_irqmsk_statchg |= (1 << i);
Write_hfc8(hw, R_SCI_MSK, hw->mr.r_irqmsk_statchg);
Write_hfc8(hw, R_ST_SEL, i);
Write_hfc8(hw, A_ST_CLK_DLY,
((nt_mode) ? CLKDEL_NT : CLKDEL_TE));
hw->mr.r_ctrl0 = ((nt_mode) ? CTRL0_NT : CTRL0_TE);
Write_hfc8(hw, A_ST_CTRL0, hw->mr.r_ctrl0);
Write_hfc8(hw, A_ST_CTRL2, 3);
Write_hfc8(hw, A_ST_WR_STA, 0); /* enable state machine */
hw->l1[i].enabled = 1;
hw->l1[i].nt_mode = nt_mode;
if (!nt_mode) {
/* setup E-fifo */
Write_hfc8(hw, R_FIFO, i * 8 + 7); /* E fifo */
wait_busy(hw);
Write_hfc8(hw, A_CON_HDLC, 0x11); /* HDLC mode, 1 fill, connect ST */
Write_hfc8(hw, A_SUBCH_CFG, 2); /* only 2 bits */
Write_hfc8(hw, A_IRQ_MSK, 1); /* enable interrupt */
Write_hfc8(hw, A_INC_RES_FIFO, 2); /* reset fifo */
wait_busy(hw);
/* setup D RX-fifo */
Write_hfc8(hw, R_FIFO, i * 8 + 5); /* RX fifo */
wait_busy(hw);
Write_hfc8(hw, A_CON_HDLC, 0x11); /* HDLC mode, 1 fill, connect ST */
Write_hfc8(hw, A_SUBCH_CFG, 2); /* only 2 bits */
Write_hfc8(hw, A_IRQ_MSK, 1); /* enable interrupt */
Write_hfc8(hw, A_INC_RES_FIFO, 2); /* reset fifo */
wait_busy(hw);
/* setup D TX-fifo */
Write_hfc8(hw, R_FIFO, i * 8 + 4); /* TX fifo */
wait_busy(hw);
Write_hfc8(hw, A_CON_HDLC, 0x11); /* HDLC mode, 1 fill, connect ST */
Write_hfc8(hw, A_SUBCH_CFG, 2); /* only 2 bits */
Write_hfc8(hw, A_IRQ_MSK, 1); /* enable interrupt */
Write_hfc8(hw, A_INC_RES_FIFO, 2); /* reset fifo */
wait_busy(hw);
}
sprintf(if_name, "hfc4s8s_%d%d_", hw->cardnum, i);
if (hisax_register
(&hw->l1[i].d_if, hw->l1[i].b_table, if_name,
((nt_mode) ? 3 : 2))) {
hw->l1[i].enabled = 0;
hw->mr.r_irqmsk_statchg &= ~(1 << i);
Write_hfc8(hw, R_SCI_MSK,
hw->mr.r_irqmsk_statchg);
printk(KERN_INFO
"HFC-4S/8S: Unable to register S/T device %s, break\n",
if_name);
break;
}
}
spin_lock_irqsave(&hw->lock, flags);
hw->mr.r_irq_ctrl |= M_GLOB_IRQ_EN;
Write_hfc8(hw, R_IRQ_CTRL, hw->mr.r_irq_ctrl);
spin_unlock_irqrestore(&hw->lock, flags);
} else {
/* disable hardware */
spin_lock_irqsave(&hw->lock, flags);
hw->mr.r_irq_ctrl &= ~M_GLOB_IRQ_EN;
Write_hfc8(hw, R_IRQ_CTRL, hw->mr.r_irq_ctrl);
spin_unlock_irqrestore(&hw->lock, flags);
for (i = hw->driver_data.max_st_ports - 1; i >= 0; i--) {
hw->l1[i].enabled = 0;
hisax_unregister(&hw->l1[i].d_if);
del_timer(&hw->l1[i].l1_timer);
skb_queue_purge(&hw->l1[i].d_tx_queue);
skb_queue_purge(&hw->l1[i].b_ch[0].tx_queue);
skb_queue_purge(&hw->l1[i].b_ch[1].tx_queue);
}
chipreset(hw);
}
} /* hfc_hardware_enable */
/******************************************/
/* disable memory mapped ports / io ports */
/******************************************/
static void
release_pci_ports(hfc4s8s_hw * hw)
{
pci_write_config_word(hw->pdev, PCI_COMMAND, 0);
#ifdef HISAX_HFC4S8S_PCIMEM
if (hw->membase)
iounmap((void *) hw->membase);
#else
if (hw->iobase)
release_region(hw->iobase, 8);
#endif
}
/*****************************************/
/* enable memory mapped ports / io ports */
/*****************************************/
static void
enable_pci_ports(hfc4s8s_hw * hw)
{
#ifdef HISAX_HFC4S8S_PCIMEM
pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
#else
pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO);
#endif
}
/*************************************/
/* initialise the HFC-4s/8s hardware */
/* return 0 on success. */
/*************************************/
static int __devinit
setup_instance(hfc4s8s_hw * hw)
{
int err = -EIO;
int i;
for (i = 0; i < HFC_MAX_ST; i++) {
struct hfc4s8s_l1 *l1p;
l1p = hw->l1 + i;
spin_lock_init(&l1p->lock);
l1p->hw = hw;
l1p->l1_timer.function = (void *) hfc_l1_timer;
l1p->l1_timer.data = (long) (l1p);
init_timer(&l1p->l1_timer);
l1p->st_num = i;
skb_queue_head_init(&l1p->d_tx_queue);
l1p->d_if.ifc.priv = hw->l1 + i;
l1p->d_if.ifc.l2l1 = (void *) dch_l2l1;
spin_lock_init(&l1p->b_ch[0].lock);
l1p->b_ch[0].b_if.ifc.l2l1 = (void *) bch_l2l1;
l1p->b_ch[0].b_if.ifc.priv = (void *) &l1p->b_ch[0];
l1p->b_ch[0].l1p = hw->l1 + i;
l1p->b_ch[0].bchan = 1;
l1p->b_table[0] = &l1p->b_ch[0].b_if;
skb_queue_head_init(&l1p->b_ch[0].tx_queue);
spin_lock_init(&l1p->b_ch[1].lock);
l1p->b_ch[1].b_if.ifc.l2l1 = (void *) bch_l2l1;
l1p->b_ch[1].b_if.ifc.priv = (void *) &l1p->b_ch[1];
l1p->b_ch[1].l1p = hw->l1 + i;
l1p->b_ch[1].bchan = 2;
l1p->b_table[1] = &l1p->b_ch[1].b_if;
skb_queue_head_init(&l1p->b_ch[1].tx_queue);
}
enable_pci_ports(hw);
chipreset(hw);
i = Read_hfc8(hw, R_CHIP_ID) >> CHIP_ID_SHIFT;
if (i != hw->driver_data.chip_id) {
printk(KERN_INFO
"HFC-4S/8S: invalid chip id 0x%x instead of 0x%x, card ignored\n",
i, hw->driver_data.chip_id);
goto out;
}
i = Read_hfc8(hw, R_CHIP_RV) & 0xf;
if (!i) {
printk(KERN_INFO
"HFC-4S/8S: chip revision 0 not supported, card ignored\n");
goto out;
}
INIT_WORK(&hw->tqueue, hfc4s8s_bh);
if (request_irq
(hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
printk(KERN_INFO
"HFC-4S/8S: unable to alloc irq %d, card ignored\n",
hw->irq);
goto out;
}
#ifdef HISAX_HFC4S8S_PCIMEM
printk(KERN_INFO
"HFC-4S/8S: found PCI card at membase 0x%p, irq %d\n",
hw->hw_membase, hw->irq);
#else
printk(KERN_INFO
"HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n",
hw->iobase, hw->irq);
#endif
hfc_hardware_enable(hw, 1, 0);
return (0);
out:
hw->irq = 0;
release_pci_ports(hw);
kfree(hw);
return (err);
}
/*****************************************/
/* PCI hotplug interface: probe new card */
/*****************************************/
static int __devinit
hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
hfc4s8s_param *driver_data = (hfc4s8s_param *) ent->driver_data;
hfc4s8s_hw *hw;
if (!(hw = kzalloc(sizeof(hfc4s8s_hw), GFP_ATOMIC))) {
printk(KERN_ERR "No kmem for HFC-4S/8S card\n");
return (err);
}
hw->pdev = pdev;
err = pci_enable_device(pdev);
if (err)
goto out;
hw->cardnum = card_cnt;
sprintf(hw->card_name, "hfc4s8s_%d", hw->cardnum);
printk(KERN_INFO "HFC-4S/8S: found adapter %s (%s) at %s\n",
driver_data->device_name, hw->card_name, pci_name(pdev));
spin_lock_init(&hw->lock);
hw->driver_data = *driver_data;
hw->irq = pdev->irq;
hw->iobase = pci_resource_start(pdev, 0);
#ifdef HISAX_HFC4S8S_PCIMEM
hw->hw_membase = (u_char *) pci_resource_start(pdev, 1);
hw->membase = ioremap((ulong) hw->hw_membase, 256);
#else
if (!request_region(hw->iobase, 8, hw->card_name)) {
printk(KERN_INFO
"HFC-4S/8S: failed to rquest address space at 0x%04x\n",
hw->iobase);
goto out;
}
#endif
pci_set_drvdata(pdev, hw);
err = setup_instance(hw);
if (!err)
card_cnt++;
return (err);
out:
kfree(hw);
return (err);
}
/**************************************/
/* PCI hotplug interface: remove card */
/**************************************/
static void __devexit
hfc4s8s_remove(struct pci_dev *pdev)
{
hfc4s8s_hw *hw = pci_get_drvdata(pdev);
printk(KERN_INFO "HFC-4S/8S: removing card %d\n", hw->cardnum);
hfc_hardware_enable(hw, 0, 0);
if (hw->irq)
free_irq(hw->irq, hw);
hw->irq = 0;
release_pci_ports(hw);
card_cnt--;
pci_disable_device(pdev);
kfree(hw);
return;
}
static struct pci_driver hfc4s8s_driver = {
.name = "hfc4s8s_l1",
.probe = hfc4s8s_probe,
.remove = __devexit_p(hfc4s8s_remove),
.id_table = hfc4s8s_ids,
};
/**********************/
/* driver Module init */
/**********************/
static int __init
hfc4s8s_module_init(void)
{
int err;
printk(KERN_INFO
"HFC-4S/8S: Layer 1 driver module for HFC-4S/8S isdn chips, %s\n",
hfc4s8s_rev);
printk(KERN_INFO
"HFC-4S/8S: (C) 2003 Cornelius Consult, www.cornelius-consult.de\n");
card_cnt = 0;
err = pci_register_driver(&hfc4s8s_driver);
if (err < 0) {
goto out;
}
printk(KERN_INFO "HFC-4S/8S: found %d cards\n", card_cnt);
#if !defined(CONFIG_HOTPLUG)
if (err == 0) {
err = -ENODEV;
pci_unregister_driver(&hfc4s8s_driver);
goto out;
}
#endif
return 0;
out:
return (err);
} /* hfc4s8s_init_hw */
/*************************************/
/* driver module exit : */
/* release the HFC-4s/8s hardware */
/*************************************/
static void __exit
hfc4s8s_module_exit(void)
{
pci_unregister_driver(&hfc4s8s_driver);
printk(KERN_INFO "HFC-4S/8S: module removed\n");
} /* hfc4s8s_release_hw */
module_init(hfc4s8s_module_init);
module_exit(hfc4s8s_module_exit);
| gpl-2.0 |
Fred6681/android_kernel_samsung_golden | drivers/media/dvb/ttusb-dec/ttusbdecfe.c | 4955 | 7875 | /*
* TTUSB DEC Frontend Driver
*
* Copyright (C) 2003-2004 Alex Woods <linux-dvb@giblets.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "dvb_frontend.h"
#include "ttusbdecfe.h"
#define LOF_HI 10600000
#define LOF_LO 9750000
struct ttusbdecfe_state {
/* configuration settings */
const struct ttusbdecfe_config* config;
struct dvb_frontend frontend;
u8 hi_band;
u8 voltage;
};
static int ttusbdecfe_dvbs_read_status(struct dvb_frontend *fe,
fe_status_t *status)
{
*status = FE_HAS_SIGNAL | FE_HAS_VITERBI |
FE_HAS_SYNC | FE_HAS_CARRIER | FE_HAS_LOCK;
return 0;
}
static int ttusbdecfe_dvbt_read_status(struct dvb_frontend *fe,
fe_status_t *status)
{
struct ttusbdecfe_state* state = fe->demodulator_priv;
u8 b[] = { 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00 };
u8 result[4];
int len, ret;
*status=0;
ret=state->config->send_command(fe, 0x73, sizeof(b), b, &len, result);
if(ret)
return ret;
if(len != 4) {
printk(KERN_ERR "%s: unexpected reply\n", __func__);
return -EIO;
}
switch(result[3]) {
case 1: /* not tuned yet */
case 2: /* no signal/no lock*/
break;
case 3: /* signal found and locked*/
*status = FE_HAS_SIGNAL | FE_HAS_VITERBI |
FE_HAS_SYNC | FE_HAS_CARRIER | FE_HAS_LOCK;
break;
case 4:
*status = FE_TIMEDOUT;
break;
default:
pr_info("%s: returned unknown value: %d\n",
__func__, result[3]);
return -EIO;
}
return 0;
}
static int ttusbdecfe_dvbt_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
u8 b[] = { 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0xff,
0x00, 0x00, 0x00, 0xff };
__be32 freq = htonl(p->frequency / 1000);
memcpy(&b[4], &freq, sizeof (u32));
state->config->send_command(fe, 0x71, sizeof(b), b, NULL, NULL);
return 0;
}
static int ttusbdecfe_dvbt_get_tune_settings(struct dvb_frontend* fe,
struct dvb_frontend_tune_settings* fesettings)
{
fesettings->min_delay_ms = 1500;
/* Drift compensation makes no sense for DVB-T */
fesettings->step_size = 0;
fesettings->max_drift = 0;
return 0;
}
static int ttusbdecfe_dvbs_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
u8 b[] = { 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00 };
__be32 freq;
__be32 sym_rate;
__be32 band;
__be32 lnb_voltage;
freq = htonl(p->frequency +
(state->hi_band ? LOF_HI : LOF_LO));
memcpy(&b[4], &freq, sizeof(u32));
sym_rate = htonl(p->u.qam.symbol_rate);
memcpy(&b[12], &sym_rate, sizeof(u32));
band = htonl(state->hi_band ? LOF_HI : LOF_LO);
memcpy(&b[24], &band, sizeof(u32));
lnb_voltage = htonl(state->voltage);
memcpy(&b[28], &lnb_voltage, sizeof(u32));
state->config->send_command(fe, 0x71, sizeof(b), b, NULL, NULL);
return 0;
}
static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *cmd)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
u8 b[] = { 0x00, 0xff, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00 };
memcpy(&b[4], cmd->msg, cmd->msg_len);
state->config->send_command(fe, 0x72,
sizeof(b) - (6 - cmd->msg_len), b,
NULL, NULL);
return 0;
}
static int ttusbdecfe_dvbs_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
state->hi_band = (SEC_TONE_ON == tone);
return 0;
}
static int ttusbdecfe_dvbs_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
switch (voltage) {
case SEC_VOLTAGE_13:
state->voltage = 13;
break;
case SEC_VOLTAGE_18:
state->voltage = 18;
break;
default:
return -EINVAL;
}
return 0;
}
static void ttusbdecfe_release(struct dvb_frontend* fe)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
kfree(state);
}
static struct dvb_frontend_ops ttusbdecfe_dvbt_ops;
struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* config)
{
struct ttusbdecfe_state* state = NULL;
/* allocate memory for the internal state */
state = kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL);
if (state == NULL)
return NULL;
/* setup the state */
state->config = config;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &ttusbdecfe_dvbt_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
}
static struct dvb_frontend_ops ttusbdecfe_dvbs_ops;
struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* config)
{
struct ttusbdecfe_state* state = NULL;
/* allocate memory for the internal state */
state = kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL);
if (state == NULL)
return NULL;
/* setup the state */
state->config = config;
state->voltage = 0;
state->hi_band = 0;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &ttusbdecfe_dvbs_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
}
static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
.info = {
.name = "TechnoTrend/Hauppauge DEC2000-t Frontend",
.type = FE_OFDM,
.frequency_min = 51000000,
.frequency_max = 858000000,
.frequency_stepsize = 62500,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_HIERARCHY_AUTO,
},
.release = ttusbdecfe_release,
.set_frontend = ttusbdecfe_dvbt_set_frontend,
.get_tune_settings = ttusbdecfe_dvbt_get_tune_settings,
.read_status = ttusbdecfe_dvbt_read_status,
};
static struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
.info = {
.name = "TechnoTrend/Hauppauge DEC3000-s Frontend",
.type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125,
.symbol_rate_min = 1000000, /* guessed */
.symbol_rate_max = 45000000, /* guessed */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK
},
.release = ttusbdecfe_release,
.set_frontend = ttusbdecfe_dvbs_set_frontend,
.read_status = ttusbdecfe_dvbs_read_status,
.diseqc_send_master_cmd = ttusbdecfe_dvbs_diseqc_send_master_cmd,
.set_voltage = ttusbdecfe_dvbs_set_voltage,
.set_tone = ttusbdecfe_dvbs_set_tone,
};
MODULE_DESCRIPTION("TTUSB DEC DVB-T/S Demodulator driver");
MODULE_AUTHOR("Alex Woods/Andrew de Quincey");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ttusbdecfe_dvbt_attach);
EXPORT_SYMBOL(ttusbdecfe_dvbs_attach);
| gpl-2.0 |
LegacyHuawei/android_kernel_huawei_msm7x30 | arch/powerpc/platforms/44x/warp.c | 7003 | 6627 | /*
* PIKA Warp(tm) board specific routines
*
* Copyright (c) 2008-2009 PIKA Technologies
* Sean MacLennan <smaclennan@pikatech.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/kthread.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of_gpio.h>
#include <linux/of_i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/ppc4xx.h>
static __initdata struct of_device_id warp_of_bus[] = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{},
};
static int __init warp_device_probe(void)
{
of_platform_bus_probe(NULL, warp_of_bus, NULL);
return 0;
}
machine_device_initcall(warp, warp_device_probe);
static int __init warp_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "pika,warp"))
return 0;
/* For __dma_alloc_coherent */
ISA_DMA_THRESHOLD = ~0L;
return 1;
}
define_machine(warp) {
.name = "Warp",
.probe = warp_probe,
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
.calibrate_decr = generic_calibrate_decr,
};
static int __init warp_post_info(void)
{
struct device_node *np;
void __iomem *fpga;
u32 post1, post2;
/* Sighhhh... POST information is in the sd area. */
np = of_find_compatible_node(NULL, NULL, "pika,fpga-sd");
if (np == NULL)
return -ENOENT;
fpga = of_iomap(np, 0);
of_node_put(np);
if (fpga == NULL)
return -ENOENT;
post1 = in_be32(fpga + 0x40);
post2 = in_be32(fpga + 0x44);
iounmap(fpga);
if (post1 || post2)
printk(KERN_INFO "Warp POST %08x %08x\n", post1, post2);
else
printk(KERN_INFO "Warp POST OK\n");
return 0;
}
#ifdef CONFIG_SENSORS_AD7414
static LIST_HEAD(dtm_shutdown_list);
static void __iomem *dtm_fpga;
static unsigned green_led, red_led;
struct dtm_shutdown {
struct list_head list;
void (*func)(void *arg);
void *arg;
};
int pika_dtm_register_shutdown(void (*func)(void *arg), void *arg)
{
struct dtm_shutdown *shutdown;
shutdown = kmalloc(sizeof(struct dtm_shutdown), GFP_KERNEL);
if (shutdown == NULL)
return -ENOMEM;
shutdown->func = func;
shutdown->arg = arg;
list_add(&shutdown->list, &dtm_shutdown_list);
return 0;
}
int pika_dtm_unregister_shutdown(void (*func)(void *arg), void *arg)
{
struct dtm_shutdown *shutdown;
list_for_each_entry(shutdown, &dtm_shutdown_list, list)
if (shutdown->func == func && shutdown->arg == arg) {
list_del(&shutdown->list);
kfree(shutdown);
return 0;
}
return -EINVAL;
}
static irqreturn_t temp_isr(int irq, void *context)
{
struct dtm_shutdown *shutdown;
int value = 1;
local_irq_disable();
gpio_set_value(green_led, 0);
/* Run through the shutdown list. */
list_for_each_entry(shutdown, &dtm_shutdown_list, list)
shutdown->func(shutdown->arg);
printk(KERN_EMERG "\n\nCritical Temperature Shutdown\n\n");
while (1) {
if (dtm_fpga) {
unsigned reset = in_be32(dtm_fpga + 0x14);
out_be32(dtm_fpga + 0x14, reset);
}
gpio_set_value(red_led, value);
value ^= 1;
mdelay(500);
}
/* Not reached */
return IRQ_HANDLED;
}
static int pika_setup_leds(void)
{
struct device_node *np, *child;
np = of_find_compatible_node(NULL, NULL, "gpio-leds");
if (!np) {
printk(KERN_ERR __FILE__ ": Unable to find leds\n");
return -ENOENT;
}
for_each_child_of_node(np, child)
if (strcmp(child->name, "green") == 0)
green_led = of_get_gpio(child, 0);
else if (strcmp(child->name, "red") == 0)
red_led = of_get_gpio(child, 0);
of_node_put(np);
return 0;
}
static void pika_setup_critical_temp(struct device_node *np,
struct i2c_client *client)
{
int irq, rc;
/* Do this before enabling critical temp interrupt since we
* may immediately interrupt.
*/
pika_setup_leds();
/* These registers are in 1 degree increments. */
i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */
i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */
irq = irq_of_parse_and_map(np, 0);
if (irq == NO_IRQ) {
printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n");
return;
}
rc = request_irq(irq, temp_isr, 0, "ad7414", NULL);
if (rc) {
printk(KERN_ERR __FILE__
": Unable to request ad7414 irq %d = %d\n", irq, rc);
return;
}
}
static inline void pika_dtm_check_fan(void __iomem *fpga)
{
static int fan_state;
u32 fan = in_be32(fpga + 0x34) & (1 << 14);
if (fan_state != fan) {
fan_state = fan;
if (fan)
printk(KERN_WARNING "Fan rotation error detected."
" Please check hardware.\n");
}
}
static int pika_dtm_thread(void __iomem *fpga)
{
struct device_node *np;
struct i2c_client *client;
np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
if (np == NULL)
return -ENOENT;
client = of_find_i2c_device_by_node(np);
if (client == NULL) {
of_node_put(np);
return -ENOENT;
}
pika_setup_critical_temp(np, client);
of_node_put(np);
printk(KERN_INFO "Warp DTM thread running.\n");
while (!kthread_should_stop()) {
int val;
val = i2c_smbus_read_word_data(client, 0);
if (val < 0)
dev_dbg(&client->dev, "DTM read temp failed.\n");
else {
s16 temp = swab16(val);
out_be32(fpga + 0x20, temp);
}
pika_dtm_check_fan(fpga);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
}
return 0;
}
static int __init pika_dtm_start(void)
{
struct task_struct *dtm_thread;
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "pika,fpga");
if (np == NULL)
return -ENOENT;
dtm_fpga = of_iomap(np, 0);
of_node_put(np);
if (dtm_fpga == NULL)
return -ENOENT;
/* Must get post info before thread starts. */
warp_post_info();
dtm_thread = kthread_run(pika_dtm_thread, dtm_fpga, "pika-dtm");
if (IS_ERR(dtm_thread)) {
iounmap(dtm_fpga);
return PTR_ERR(dtm_thread);
}
return 0;
}
machine_late_initcall(warp, pika_dtm_start);
#else /* !CONFIG_SENSORS_AD7414 */
int pika_dtm_register_shutdown(void (*func)(void *arg), void *arg)
{
return 0;
}
int pika_dtm_unregister_shutdown(void (*func)(void *arg), void *arg)
{
return 0;
}
machine_late_initcall(warp, warp_post_info);
#endif
EXPORT_SYMBOL(pika_dtm_register_shutdown);
EXPORT_SYMBOL(pika_dtm_unregister_shutdown);
| gpl-2.0 |
kennysgithub/sm-p607t-kernel | drivers/pci/pci-label.c | 7515 | 8429 | /*
* Purpose: Export the firmware instance and label associated with
* a pci device to sysfs
* Copyright (C) 2010 Dell Inc.
* by Narendra K <Narendra_K@dell.com>,
* Jordan Hargrave <Jordan_Hargrave@dell.com>
*
* PCI Firmware Specification Revision 3.1 section 4.6.7 (DSM for Naming a
* PCI or PCI Express Device Under Operating Systems) defines an instance
* number and string name. This code retrieves them and exports them to sysfs.
* If the system firmware does not provide the ACPI _DSM (Device Specific
* Method), then the SMBIOS type 41 instance number and string is exported to
* sysfs.
*
* SMBIOS defines type 41 for onboard pci devices. This code retrieves
* the instance number and string from the type 41 record and exports
* it to sysfs.
*
* Please see http://linux.dell.com/wiki/index.php/Oss/libnetdevname for more
* information.
*/
#include <linux/dmi.h>
#include <linux/sysfs.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/nls.h>
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
#include <acpi/acpi_bus.h>
#include "pci.h"
#define DEVICE_LABEL_DSM 0x07
#ifndef CONFIG_DMI
static inline int
pci_create_smbiosname_file(struct pci_dev *pdev)
{
return -1;
}
static inline void
pci_remove_smbiosname_file(struct pci_dev *pdev)
{
}
#else
enum smbios_attr_enum {
SMBIOS_ATTR_NONE = 0,
SMBIOS_ATTR_LABEL_SHOW,
SMBIOS_ATTR_INSTANCE_SHOW,
};
static size_t
find_smbios_instance_string(struct pci_dev *pdev, char *buf,
enum smbios_attr_enum attribute)
{
const struct dmi_device *dmi;
struct dmi_dev_onboard *donboard;
int bus;
int devfn;
bus = pdev->bus->number;
devfn = pdev->devfn;
dmi = NULL;
while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
NULL, dmi)) != NULL) {
donboard = dmi->device_data;
if (donboard && donboard->bus == bus &&
donboard->devfn == devfn) {
if (buf) {
if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
return scnprintf(buf, PAGE_SIZE,
"%d\n",
donboard->instance);
else if (attribute == SMBIOS_ATTR_LABEL_SHOW)
return scnprintf(buf, PAGE_SIZE,
"%s\n",
dmi->name);
}
return strlen(dmi->name);
}
}
return 0;
}
static umode_t
smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
int n)
{
struct device *dev;
struct pci_dev *pdev;
dev = container_of(kobj, struct device, kobj);
pdev = to_pci_dev(dev);
return find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE) ?
S_IRUGO : 0;
}
static ssize_t
smbioslabel_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
return find_smbios_instance_string(pdev, buf,
SMBIOS_ATTR_LABEL_SHOW);
}
static ssize_t
smbiosinstance_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
return find_smbios_instance_string(pdev, buf,
SMBIOS_ATTR_INSTANCE_SHOW);
}
static struct device_attribute smbios_attr_label = {
.attr = {.name = "label", .mode = 0444},
.show = smbioslabel_show,
};
static struct device_attribute smbios_attr_instance = {
.attr = {.name = "index", .mode = 0444},
.show = smbiosinstance_show,
};
static struct attribute *smbios_attributes[] = {
&smbios_attr_label.attr,
&smbios_attr_instance.attr,
NULL,
};
static struct attribute_group smbios_attr_group = {
.attrs = smbios_attributes,
.is_visible = smbios_instance_string_exist,
};
static int
pci_create_smbiosname_file(struct pci_dev *pdev)
{
return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group);
}
static void
pci_remove_smbiosname_file(struct pci_dev *pdev)
{
sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group);
}
#endif
#ifndef CONFIG_ACPI
static inline int
pci_create_acpi_index_label_files(struct pci_dev *pdev)
{
return -1;
}
static inline int
pci_remove_acpi_index_label_files(struct pci_dev *pdev)
{
return -1;
}
static inline bool
device_has_dsm(struct device *dev)
{
return false;
}
#else
static const char device_label_dsm_uuid[] = {
0xD0, 0x37, 0xC9, 0xE5, 0x53, 0x35, 0x7A, 0x4D,
0x91, 0x17, 0xEA, 0x4D, 0x19, 0xC3, 0x43, 0x4D
};
enum acpi_attr_enum {
ACPI_ATTR_NONE = 0,
ACPI_ATTR_LABEL_SHOW,
ACPI_ATTR_INDEX_SHOW,
};
static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
{
int len;
len = utf16s_to_utf8s((const wchar_t *)obj->
package.elements[1].string.pointer,
obj->package.elements[1].string.length,
UTF16_LITTLE_ENDIAN,
buf, PAGE_SIZE);
buf[len] = '\n';
}
static int
dsm_get_label(acpi_handle handle, int func,
struct acpi_buffer *output,
char *buf, enum acpi_attr_enum attribute)
{
struct acpi_object_list input;
union acpi_object params[4];
union acpi_object *obj;
int len = 0;
int err;
input.count = 4;
input.pointer = params;
params[0].type = ACPI_TYPE_BUFFER;
params[0].buffer.length = sizeof(device_label_dsm_uuid);
params[0].buffer.pointer = (char *)device_label_dsm_uuid;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = 0x02;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
params[3].type = ACPI_TYPE_PACKAGE;
params[3].package.count = 0;
params[3].package.elements = NULL;
err = acpi_evaluate_object(handle, "_DSM", &input, output);
if (err)
return -1;
obj = (union acpi_object *)output->pointer;
switch (obj->type) {
case ACPI_TYPE_PACKAGE:
if (obj->package.count != 2)
break;
len = obj->package.elements[0].integer.value;
if (buf) {
if (attribute == ACPI_ATTR_INDEX_SHOW)
scnprintf(buf, PAGE_SIZE, "%llu\n",
obj->package.elements[0].integer.value);
else if (attribute == ACPI_ATTR_LABEL_SHOW)
dsm_label_utf16s_to_utf8s(obj, buf);
kfree(output->pointer);
return strlen(buf);
}
kfree(output->pointer);
return len;
break;
default:
kfree(output->pointer);
}
return -1;
}
static bool
device_has_dsm(struct device *dev)
{
acpi_handle handle;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
handle = DEVICE_ACPI_HANDLE(dev);
if (!handle)
return FALSE;
if (dsm_get_label(handle, DEVICE_LABEL_DSM, &output, NULL,
ACPI_ATTR_NONE) > 0)
return TRUE;
return FALSE;
}
static umode_t
acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
{
struct device *dev;
dev = container_of(kobj, struct device, kobj);
if (device_has_dsm(dev))
return S_IRUGO;
return 0;
}
static ssize_t
acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_handle handle;
int length;
handle = DEVICE_ACPI_HANDLE(dev);
if (!handle)
return -1;
length = dsm_get_label(handle, DEVICE_LABEL_DSM,
&output, buf, ACPI_ATTR_LABEL_SHOW);
if (length < 1)
return -1;
return length;
}
static ssize_t
acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_handle handle;
int length;
handle = DEVICE_ACPI_HANDLE(dev);
if (!handle)
return -1;
length = dsm_get_label(handle, DEVICE_LABEL_DSM,
&output, buf, ACPI_ATTR_INDEX_SHOW);
if (length < 0)
return -1;
return length;
}
static struct device_attribute acpi_attr_label = {
.attr = {.name = "label", .mode = 0444},
.show = acpilabel_show,
};
static struct device_attribute acpi_attr_index = {
.attr = {.name = "acpi_index", .mode = 0444},
.show = acpiindex_show,
};
static struct attribute *acpi_attributes[] = {
&acpi_attr_label.attr,
&acpi_attr_index.attr,
NULL,
};
static struct attribute_group acpi_attr_group = {
.attrs = acpi_attributes,
.is_visible = acpi_index_string_exist,
};
static int
pci_create_acpi_index_label_files(struct pci_dev *pdev)
{
return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group);
}
static int
pci_remove_acpi_index_label_files(struct pci_dev *pdev)
{
sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group);
return 0;
}
#endif
void pci_create_firmware_label_files(struct pci_dev *pdev)
{
if (device_has_dsm(&pdev->dev))
pci_create_acpi_index_label_files(pdev);
else
pci_create_smbiosname_file(pdev);
}
void pci_remove_firmware_label_files(struct pci_dev *pdev)
{
if (device_has_dsm(&pdev->dev))
pci_remove_acpi_index_label_files(pdev);
else
pci_remove_smbiosname_file(pdev);
}
| gpl-2.0 |
alexpotter1/Neutron_msm8974_d802 | drivers/staging/comedi/drivers/acl7225b.c | 8283 | 4285 | /*
* comedi/drivers/acl7225b.c
* Driver for Adlink NuDAQ ACL-7225b and clones
* José Luis Sánchez
*/
/*
Driver: acl7225b
Description: Adlink NuDAQ ACL-7225b & compatibles
Author: José Luis Sánchez (jsanchezv@teleline.es)
Status: testing
Devices: [Adlink] ACL-7225b (acl7225b), [ICP] P16R16DIO (p16r16dio)
*/
#include "../comedidev.h"
#include <linux/ioport.h>
#define ACL7225_SIZE 8 /* Requires 8 ioports, but only 4 are used */
#define P16R16DIO_SIZE 4
#define ACL7225_RIO_LO 0 /* Relays input/output low byte (R0-R7) */
#define ACL7225_RIO_HI 1 /* Relays input/output high byte (R8-R15) */
#define ACL7225_DI_LO 2 /* Digital input low byte (DI0-DI7) */
#define ACL7225_DI_HI 3 /* Digital input high byte (DI8-DI15) */
static int acl7225b_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int acl7225b_detach(struct comedi_device *dev);
struct boardtype {
const char *name; /* driver name */
int io_range; /* len of I/O space */
};
static const struct boardtype boardtypes[] = {
{"acl7225b", ACL7225_SIZE,},
{"p16r16dio", P16R16DIO_SIZE,},
};
#define n_boardtypes (sizeof(boardtypes)/sizeof(struct boardtype))
#define this_board ((const struct boardtype *)dev->board_ptr)
static struct comedi_driver driver_acl7225b = {
.driver_name = "acl7225b",
.module = THIS_MODULE,
.attach = acl7225b_attach,
.detach = acl7225b_detach,
.board_name = &boardtypes[0].name,
.num_names = n_boardtypes,
.offset = sizeof(struct boardtype),
};
static int __init driver_acl7225b_init_module(void)
{
return comedi_driver_register(&driver_acl7225b);
}
static void __exit driver_acl7225b_cleanup_module(void)
{
comedi_driver_unregister(&driver_acl7225b);
}
module_init(driver_acl7225b_init_module);
module_exit(driver_acl7225b_cleanup_module);
static int acl7225b_do_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
if (data[0]) {
s->state &= ~data[0];
s->state |= (data[0] & data[1]);
}
if (data[0] & 0x00ff)
outb(s->state & 0xff, dev->iobase + (unsigned long)s->private);
if (data[0] & 0xff00)
outb((s->state >> 8),
dev->iobase + (unsigned long)s->private + 1);
data[1] = s->state;
return 2;
}
static int acl7225b_di_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
data[1] = inb(dev->iobase + (unsigned long)s->private) |
(inb(dev->iobase + (unsigned long)s->private + 1) << 8);
return 2;
}
static int acl7225b_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
int iobase, iorange;
iobase = it->options[0];
iorange = this_board->io_range;
printk(KERN_INFO "comedi%d: acl7225b: board=%s 0x%04x\n", dev->minor,
this_board->name, iobase);
if (!request_region(iobase, iorange, "acl7225b")) {
printk(KERN_ERR "comedi%d: request_region failed - I/O port conflict\n",
dev->minor);
return -EIO;
}
dev->board_name = this_board->name;
dev->iobase = iobase;
dev->irq = 0;
if (alloc_subdevices(dev, 3) < 0)
return -ENOMEM;
s = dev->subdevices + 0;
/* Relays outputs */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->maxdata = 1;
s->n_chan = 16;
s->insn_bits = acl7225b_do_insn;
s->range_table = &range_digital;
s->private = (void *)ACL7225_RIO_LO;
s = dev->subdevices + 1;
/* Relays status */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->maxdata = 1;
s->n_chan = 16;
s->insn_bits = acl7225b_di_insn;
s->range_table = &range_digital;
s->private = (void *)ACL7225_RIO_LO;
s = dev->subdevices + 2;
/* Isolated digital inputs */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->maxdata = 1;
s->n_chan = 16;
s->insn_bits = acl7225b_di_insn;
s->range_table = &range_digital;
s->private = (void *)ACL7225_DI_LO;
return 0;
}
static int acl7225b_detach(struct comedi_device *dev)
{
printk(KERN_INFO "comedi%d: acl7225b: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, this_board->io_range);
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
goldelico/letux-400 | fs/xfs/xfs_trans_buf.c | 92 | 31705 | /*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
#include "xfs_rw.h"
STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
xfs_daddr_t, int);
STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *,
xfs_daddr_t, int);
/*
* Get and lock the buffer for the caller if it is not already
* locked within the given transaction. If it is already locked
* within the transaction, just increment its lock recursion count
* and return a pointer to it.
*
* Use the fast path function xfs_trans_buf_item_match() or the buffer
* cache routine incore_match() to find the buffer
* if it is already owned by this transaction.
*
* If we don't already own the buffer, use get_buf() to get it.
* If it doesn't yet have an associated xfs_buf_log_item structure,
* then allocate one and add the item to this transaction.
*
* If the transaction pointer is NULL, make this just a normal
* get_buf() call.
*/
xfs_buf_t *
xfs_trans_get_buf(xfs_trans_t *tp,
xfs_buftarg_t *target_dev,
xfs_daddr_t blkno,
int len,
uint flags)
{
xfs_buf_t *bp;
xfs_buf_log_item_t *bip;
if (flags == 0)
flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
/*
* Default to a normal get_buf() call if the tp is NULL.
*/
if (tp == NULL) {
bp = xfs_buf_get_flags(target_dev, blkno, len,
flags | BUF_BUSY);
return(bp);
}
/*
* If we find the buffer in the cache with this transaction
* pointer in its b_fsprivate2 field, then we know we already
* have it locked. In this case we just increment the lock
* recursion count and return the buffer to the caller.
*/
if (tp->t_items.lic_next == NULL) {
bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
} else {
bp = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len);
}
if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
xfs_buftrace("TRANS GET RECUR SHUT", bp);
XFS_BUF_SUPER_STALE(bp);
}
/*
* If the buffer is stale then it was binval'ed
* since last read. This doesn't matter since the
* caller isn't allowed to use the data anyway.
*/
else if (XFS_BUF_ISSTALE(bp)) {
xfs_buftrace("TRANS GET RECUR STALE", bp);
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
}
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
xfs_buftrace("TRANS GET RECUR", bp);
xfs_buf_item_trace("GET RECUR", bip);
return (bp);
}
/*
* We always specify the BUF_BUSY flag within a transaction so
* that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
bp = xfs_buf_get_flags(target_dev, blkno, len, flags | BUF_BUSY);
if (bp == NULL) {
return NULL;
}
ASSERT(!XFS_BUF_GETERROR(bp));
/*
* The xfs_buf_log_item pointer is stored in b_fsprivate. If
* it doesn't have one yet, then allocate one and initialize it.
* The checks to see if one is there are in xfs_buf_item_init().
*/
xfs_buf_item_init(bp, tp->t_mountp);
/*
* Set the recursion count for the buffer within this transaction
* to 0.
*/
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
bip->bli_recur = 0;
/*
* Take a reference for this transaction on the buf item.
*/
atomic_inc(&bip->bli_refcount);
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
/*
* Initialize b_fsprivate2 so we can find it with incore_match()
* above.
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
xfs_buftrace("TRANS GET", bp);
xfs_buf_item_trace("GET", bip);
return (bp);
}
/*
* Get and lock the superblock buffer of this file system for the
* given transaction.
*
* We don't need to use incore_match() here, because the superblock
* buffer is a private buffer which we keep a pointer to in the
* mount structure.
*/
xfs_buf_t *
xfs_trans_getsb(xfs_trans_t *tp,
struct xfs_mount *mp,
int flags)
{
xfs_buf_t *bp;
xfs_buf_log_item_t *bip;
/*
* Default to just trying to lock the superblock buffer
* if tp is NULL.
*/
if (tp == NULL) {
return (xfs_getsb(mp, flags));
}
/*
* If the superblock buffer already has this transaction
* pointer in its b_fsprivate2 field, then we know we already
* have it locked. In this case we just increment the lock
* recursion count and return the buffer to the caller.
*/
bp = mp->m_sb_bp;
if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) {
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
xfs_buf_item_trace("GETSB RECUR", bip);
return (bp);
}
bp = xfs_getsb(mp, flags);
if (bp == NULL) {
return NULL;
}
/*
* The xfs_buf_log_item pointer is stored in b_fsprivate. If
* it doesn't have one yet, then allocate one and initialize it.
* The checks to see if one is there are in xfs_buf_item_init().
*/
xfs_buf_item_init(bp, mp);
/*
* Set the recursion count for the buffer within this transaction
* to 0.
*/
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
bip->bli_recur = 0;
/*
* Take a reference for this transaction on the buf item.
*/
atomic_inc(&bip->bli_refcount);
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
/*
* Initialize b_fsprivate2 so we can find it with incore_match()
* above.
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
xfs_buf_item_trace("GETSB", bip);
return (bp);
}
#ifdef DEBUG
xfs_buftarg_t *xfs_error_target;
int xfs_do_error;
int xfs_req_num;
int xfs_error_mod = 33;
#endif
/*
* Get and lock the buffer for the caller if it is not already
* locked within the given transaction. If it has not yet been
* read in, read it from disk. If it is already locked
* within the transaction and already read in, just increment its
* lock recursion count and return a pointer to it.
*
* Use the fast path function xfs_trans_buf_item_match() or the buffer
* cache routine incore_match() to find the buffer
* if it is already owned by this transaction.
*
* If we don't already own the buffer, use read_buf() to get it.
* If it doesn't yet have an associated xfs_buf_log_item structure,
* then allocate one and add the item to this transaction.
*
* If the transaction pointer is NULL, make this just a normal
* read_buf() call.
*/
int
xfs_trans_read_buf(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_buftarg_t *target,
xfs_daddr_t blkno,
int len,
uint flags,
xfs_buf_t **bpp)
{
xfs_buf_t *bp;
xfs_buf_log_item_t *bip;
int error;
if (flags == 0)
flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
/*
* Default to a normal get_buf() call if the tp is NULL.
*/
if (tp == NULL) {
bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
if (!bp)
return XFS_ERROR(ENOMEM);
if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) {
xfs_ioerror_alert("xfs_trans_read_buf", mp,
bp, blkno);
error = XFS_BUF_GETERROR(bp);
xfs_buf_relse(bp);
return error;
}
#ifdef DEBUG
if (xfs_do_error && (bp != NULL)) {
if (xfs_error_target == target) {
if (((xfs_req_num++) % xfs_error_mod) == 0) {
xfs_buf_relse(bp);
cmn_err(CE_DEBUG, "Returning error!\n");
return XFS_ERROR(EIO);
}
}
}
#endif
if (XFS_FORCED_SHUTDOWN(mp))
goto shutdown_abort;
*bpp = bp;
return 0;
}
/*
* If we find the buffer in the cache with this transaction
* pointer in its b_fsprivate2 field, then we know we already
* have it locked. If it is already read in we just increment
* the lock recursion count and return the buffer to the caller.
* If the buffer is not yet read in, then we read it in, increment
* the lock recursion count, and return it to the caller.
*/
if (tp->t_items.lic_next == NULL) {
bp = xfs_trans_buf_item_match(tp, target, blkno, len);
} else {
bp = xfs_trans_buf_item_match_all(tp, target, blkno, len);
}
if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
ASSERT((XFS_BUF_ISERROR(bp)) == 0);
if (!(XFS_BUF_ISDONE(bp))) {
xfs_buftrace("READ_BUF_INCORE !DONE", bp);
ASSERT(!XFS_BUF_ISASYNC(bp));
XFS_BUF_READ(bp);
xfsbdstrat(tp->t_mountp, bp);
xfs_iowait(bp);
if (XFS_BUF_GETERROR(bp) != 0) {
xfs_ioerror_alert("xfs_trans_read_buf", mp,
bp, blkno);
error = XFS_BUF_GETERROR(bp);
xfs_buf_relse(bp);
/*
* We can gracefully recover from most
* read errors. Ones we can't are those
* that happen after the transaction's
* already dirty.
*/
if (tp->t_flags & XFS_TRANS_DIRTY)
xfs_force_shutdown(tp->t_mountp,
SHUTDOWN_META_IO_ERROR);
return error;
}
}
/*
* We never locked this buf ourselves, so we shouldn't
* brelse it either. Just get out.
*/
if (XFS_FORCED_SHUTDOWN(mp)) {
xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp);
*bpp = NULL;
return XFS_ERROR(EIO);
}
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
bip->bli_recur++;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
xfs_buf_item_trace("READ RECUR", bip);
*bpp = bp;
return 0;
}
/*
* We always specify the BUF_BUSY flag within a transaction so
* that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
if (bp == NULL) {
*bpp = NULL;
return 0;
}
if (XFS_BUF_GETERROR(bp) != 0) {
XFS_BUF_SUPER_STALE(bp);
xfs_buftrace("READ ERROR", bp);
error = XFS_BUF_GETERROR(bp);
xfs_ioerror_alert("xfs_trans_read_buf", mp,
bp, blkno);
if (tp->t_flags & XFS_TRANS_DIRTY)
xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
xfs_buf_relse(bp);
return error;
}
#ifdef DEBUG
if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
if (xfs_error_target == target) {
if (((xfs_req_num++) % xfs_error_mod) == 0) {
xfs_force_shutdown(tp->t_mountp,
SHUTDOWN_META_IO_ERROR);
xfs_buf_relse(bp);
cmn_err(CE_DEBUG, "Returning trans error!\n");
return XFS_ERROR(EIO);
}
}
}
#endif
if (XFS_FORCED_SHUTDOWN(mp))
goto shutdown_abort;
/*
* The xfs_buf_log_item pointer is stored in b_fsprivate. If
* it doesn't have one yet, then allocate one and initialize it.
* The checks to see if one is there are in xfs_buf_item_init().
*/
xfs_buf_item_init(bp, tp->t_mountp);
/*
* Set the recursion count for the buffer within this transaction
* to 0.
*/
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
bip->bli_recur = 0;
/*
* Take a reference for this transaction on the buf item.
*/
atomic_inc(&bip->bli_refcount);
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
/*
* Initialize b_fsprivate2 so we can find it with incore_match()
* above.
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
xfs_buftrace("TRANS READ", bp);
xfs_buf_item_trace("READ", bip);
*bpp = bp;
return 0;
shutdown_abort:
/*
* the theory here is that buffer is good but we're
* bailing out because the filesystem is being forcibly
* shut down. So we should leave the b_flags alone since
* the buffer's not staled and just get out.
*/
#if defined(DEBUG)
if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
#endif
ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
(XFS_B_STALE|XFS_B_DELWRI));
xfs_buftrace("READ_BUF XFSSHUTDN", bp);
xfs_buf_relse(bp);
*bpp = NULL;
return XFS_ERROR(EIO);
}
/*
* Release the buffer bp which was previously acquired with one of the
* xfs_trans_... buffer allocation routines if the buffer has not
* been modified within this transaction. If the buffer is modified
* within this transaction, do decrement the recursion count but do
* not release the buffer even if the count goes to 0. If the buffer is not
* modified within the transaction, decrement the recursion count and
* release the buffer if the recursion count goes to 0.
*
* If the buffer is to be released and it was not modified before
* this transaction began, then free the buf_log_item associated with it.
*
* If the transaction pointer is NULL, make this just a normal
* brelse() call.
*/
void
xfs_trans_brelse(xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip;
xfs_log_item_t *lip;
xfs_log_item_desc_t *lidp;
/*
* Default to a normal brelse() call if the tp is NULL.
*/
if (tp == NULL) {
ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
/*
* If there's a buf log item attached to the buffer,
* then let the AIL know that the buffer is being
* unlocked.
*/
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
if (lip->li_type == XFS_LI_BUF) {
bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*);
xfs_trans_unlocked_item(
bip->bli_item.li_mountp,
lip);
}
}
xfs_buf_relse(bp);
return;
}
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
/*
* Find the item descriptor pointing to this buffer's
* log item. It must be there.
*/
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
ASSERT(lidp != NULL);
/*
* If the release is just for a recursive lock,
* then decrement the count and return.
*/
if (bip->bli_recur > 0) {
bip->bli_recur--;
xfs_buf_item_trace("RELSE RECUR", bip);
return;
}
/*
* If the buffer is dirty within this transaction, we can't
* release it until we commit.
*/
if (lidp->lid_flags & XFS_LID_DIRTY) {
xfs_buf_item_trace("RELSE DIRTY", bip);
return;
}
/*
* If the buffer has been invalidated, then we can't release
* it until the transaction commits to disk unless it is re-dirtied
* as part of this transaction. This prevents us from pulling
* the item from the AIL before we should.
*/
if (bip->bli_flags & XFS_BLI_STALE) {
xfs_buf_item_trace("RELSE STALE", bip);
return;
}
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
xfs_buf_item_trace("RELSE", bip);
/*
* Free up the log item descriptor tracking the released item.
*/
xfs_trans_free_item(tp, lidp);
/*
* Clear the hold flag in the buf log item if it is set.
* We wouldn't want the next user of the buffer to
* get confused.
*/
if (bip->bli_flags & XFS_BLI_HOLD) {
bip->bli_flags &= ~XFS_BLI_HOLD;
}
/*
* Drop our reference to the buf log item.
*/
atomic_dec(&bip->bli_refcount);
/*
* If the buf item is not tracking data in the log, then
* we must free it before releasing the buffer back to the
* free pool. Before releasing the buffer to the free pool,
* clear the transaction pointer in b_fsprivate2 to dissolve
* its relation to this transaction.
*/
if (!xfs_buf_item_dirty(bip)) {
/***
ASSERT(bp->b_pincount == 0);
***/
ASSERT(atomic_read(&bip->bli_refcount) == 0);
ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
xfs_buf_item_relse(bp);
bip = NULL;
}
XFS_BUF_SET_FSPRIVATE2(bp, NULL);
/*
* If we've still got a buf log item on the buffer, then
* tell the AIL that the buffer is being unlocked.
*/
if (bip != NULL) {
xfs_trans_unlocked_item(bip->bli_item.li_mountp,
(xfs_log_item_t*)bip);
}
xfs_buf_relse(bp);
return;
}
/*
* Add the locked buffer to the transaction.
* The buffer must be locked, and it cannot be associated with any
* transaction.
*
* If the buffer does not yet have a buf log item associated with it,
* then allocate one for it. Then add the buf item to the transaction.
*/
void
xfs_trans_bjoin(xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
/*
* The xfs_buf_log_item pointer is stored in b_fsprivate. If
* it doesn't have one yet, then allocate one and initialize it.
* The checks to see if one is there are in xfs_buf_item_init().
*/
xfs_buf_item_init(bp, tp->t_mountp);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
/*
* Take a reference for this transaction on the buf item.
*/
atomic_inc(&bip->bli_refcount);
/*
* Get a log_item_desc to point at the new item.
*/
(void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip);
/*
* Initialize b_fsprivate2 so we can find it with incore_match()
* in xfs_trans_get_buf() and friends above.
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
xfs_buf_item_trace("BJOIN", bip);
}
/*
* Mark the buffer as not needing to be unlocked when the buf item's
* IOP_UNLOCK() routine is called. The buffer must already be locked
* and associated with the given transaction.
*/
/* ARGSUSED */
void
xfs_trans_bhold(xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_HOLD;
xfs_buf_item_trace("BHOLD", bip);
}
/*
* Cancel the previous buffer hold request made on this buffer
* for this transaction.
*/
void
xfs_trans_bhold_release(xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT(bip->bli_flags & XFS_BLI_HOLD);
bip->bli_flags &= ~XFS_BLI_HOLD;
xfs_buf_item_trace("BHOLD RELEASE", bip);
}
/*
* This is called to mark bytes first through last inclusive of the given
* buffer as needing to be logged when the transaction is committed.
* The buffer must already be associated with the given transaction.
*
* First and last are numbers relative to the beginning of this buffer,
* so the first byte in the buffer is numbered 0 regardless of the
* value of b_blkno.
*/
void
xfs_trans_log_buf(xfs_trans_t *tp,
xfs_buf_t *bp,
uint first,
uint last)
{
xfs_buf_log_item_t *bip;
xfs_log_item_desc_t *lidp;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp)));
ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) ||
(XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks));
/*
* Mark the buffer as needing to be written out eventually,
* and set its iodone function to remove the buffer's buf log
* item from the AIL and free it when the buffer is flushed
* to disk. See xfs_buf_attach_iodone() for more details
* on li_cb and xfs_buf_iodone_callbacks().
* If we end up aborting this transaction, we trap this buffer
* inside the b_bdstrat callback so that this won't get written to
* disk.
*/
XFS_BUF_DELAYWRITE(bp);
XFS_BUF_DONE(bp);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
/*
* If we invalidated the buffer within this transaction, then
* cancel the invalidation now that we're dirtying the buffer
* again. There are no races with the code in xfs_buf_item_unpin(),
* because we have a reference to the buffer this entire time.
*/
if (bip->bli_flags & XFS_BLI_STALE) {
xfs_buf_item_trace("BLOG UNSTALE", bip);
bip->bli_flags &= ~XFS_BLI_STALE;
ASSERT(XFS_BUF_ISSTALE(bp));
XFS_BUF_UNSTALE(bp);
bip->bli_format.blf_flags &= ~XFS_BLI_CANCEL;
}
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
ASSERT(lidp != NULL);
tp->t_flags |= XFS_TRANS_DIRTY;
lidp->lid_flags |= XFS_LID_DIRTY;
lidp->lid_flags &= ~XFS_LID_BUF_STALE;
bip->bli_flags |= XFS_BLI_LOGGED;
xfs_buf_item_log(bip, first, last);
xfs_buf_item_trace("BLOG", bip);
}
/*
* This called to invalidate a buffer that is being used within
* a transaction. Typically this is because the blocks in the
* buffer are being freed, so we need to prevent it from being
* written out when we're done. Allowing it to be written again
* might overwrite data in the free blocks if they are reallocated
* to a file.
*
* We prevent the buffer from being written out by clearing the
* B_DELWRI flag. We can't always
* get rid of the buf log item at this point, though, because
* the buffer may still be pinned by another transaction. If that
* is the case, then we'll wait until the buffer is committed to
* disk for the last time (we can tell by the ref count) and
* free it in xfs_buf_item_unpin(). Until it is cleaned up we
* will keep the buffer locked so that the buffer and buf log item
* are not reused.
*/
void
xfs_trans_binval(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_log_item_desc_t *lidp;
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
ASSERT(lidp != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
if (bip->bli_flags & XFS_BLI_STALE) {
/*
* If the buffer is already invalidated, then
* just return.
*/
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_INODE_BUF));
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
xfs_buftrace("XFS_BINVAL RECUR", bp);
xfs_buf_item_trace("BINVAL RECUR", bip);
return;
}
/*
* Clear the dirty bit in the buffer and set the STALE flag
* in the buf log item. The STALE flag will be used in
* xfs_buf_item_unpin() to determine if it should clean up
* when the last reference to the buf item is given up.
* We set the XFS_BLI_CANCEL flag in the buf log format structure
* and log the buf item. This will be used at recovery time
* to determine that copies of the buffer in the log before
* this should not be replayed.
* We mark the item descriptor and the transaction dirty so
* that we'll hold the buffer until after the commit.
*
* Since we're invalidating the buffer, we also clear the state
* about which parts of the buffer have been logged. We also
* clear the flag indicating that this is an inode buffer since
* the data in the buffer will no longer be valid.
*
* We set the stale bit in the buffer as well since we're getting
* rid of it.
*/
XFS_BUF_UNDELAYWRITE(bp);
XFS_BUF_STALE(bp);
bip->bli_flags |= XFS_BLI_STALE;
bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_DIRTY);
bip->bli_format.blf_flags &= ~XFS_BLI_INODE_BUF;
bip->bli_format.blf_flags |= XFS_BLI_CANCEL;
memset((char *)(bip->bli_format.blf_data_map), 0,
(bip->bli_format.blf_map_size * sizeof(uint)));
lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
tp->t_flags |= XFS_TRANS_DIRTY;
xfs_buftrace("XFS_BINVAL", bp);
xfs_buf_item_trace("BINVAL", bip);
}
/*
* This call is used to indicate that the buffer contains on-disk
* inodes which must be handled specially during recovery. They
* require special handling because only the di_next_unlinked from
* the inodes in the buffer should be recovered. The rest of the
* data in the buffer is logged via the inodes themselves.
*
* All we do is set the XFS_BLI_INODE_BUF flag in the buffer's log
* format structure so that we'll know what to do at recovery time.
*/
/* ARGSUSED */
void
xfs_trans_inode_buf(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_format.blf_flags |= XFS_BLI_INODE_BUF;
}
/*
* This call is used to indicate that the buffer is going to
* be staled and was an inode buffer. This means it gets
* special processing during unpin - where any inodes
* associated with the buffer should be removed from ail.
* There is also special processing during recovery,
* any replay of the inodes in the buffer needs to be
* prevented as the buffer may have been reused.
*/
void
xfs_trans_stale_inode_buf(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_STALE_INODE;
bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))
xfs_buf_iodone;
}
/*
* Mark the buffer as being one which contains newly allocated
* inodes. We need to make sure that even if this buffer is
* relogged as an 'inode buf' we still recover all of the inode
* images in the face of a crash. This works in coordination with
* xfs_buf_item_committed() to ensure that the buffer remains in the
* AIL at its original location even after it has been relogged.
*/
/* ARGSUSED */
void
xfs_trans_inode_alloc_buf(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
}
/*
* Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
* dquots. However, unlike in inode buffer recovery, dquot buffers get
* recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
* The only thing that makes dquot buffers different from regular
* buffers is that we must not replay dquot bufs when recovering
* if a _corresponding_ quotaoff has happened. We also have to distinguish
* between usr dquot bufs and grp dquot bufs, because usr and grp quotas
* can be turned off independently.
*/
/* ARGSUSED */
void
xfs_trans_dquot_buf(
xfs_trans_t *tp,
xfs_buf_t *bp,
uint type)
{
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
ASSERT(type == XFS_BLI_UDQUOT_BUF ||
type == XFS_BLI_PDQUOT_BUF ||
type == XFS_BLI_GDQUOT_BUF);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_format.blf_flags |= type;
}
/*
* Check to see if a buffer matching the given parameters is already
* a part of the given transaction. Only check the first, embedded
* chunk, since we don't want to spend all day scanning large transactions.
*/
STATIC xfs_buf_t *
xfs_trans_buf_item_match(
xfs_trans_t *tp,
xfs_buftarg_t *target,
xfs_daddr_t blkno,
int len)
{
xfs_log_item_chunk_t *licp;
xfs_log_item_desc_t *lidp;
xfs_buf_log_item_t *blip;
xfs_buf_t *bp;
int i;
bp = NULL;
len = BBTOB(len);
licp = &tp->t_items;
if (!XFS_LIC_ARE_ALL_FREE(licp)) {
for (i = 0; i < licp->lic_unused; i++) {
/*
* Skip unoccupied slots.
*/
if (XFS_LIC_ISFREE(licp, i)) {
continue;
}
lidp = XFS_LIC_SLOT(licp, i);
blip = (xfs_buf_log_item_t *)lidp->lid_item;
if (blip->bli_item.li_type != XFS_LI_BUF) {
continue;
}
bp = blip->bli_buf;
if ((XFS_BUF_TARGET(bp) == target) &&
(XFS_BUF_ADDR(bp) == blkno) &&
(XFS_BUF_COUNT(bp) == len)) {
/*
* We found it. Break out and
* return the pointer to the buffer.
*/
break;
} else {
bp = NULL;
}
}
}
return bp;
}
/*
* Check to see if a buffer matching the given parameters is already
* a part of the given transaction. Check all the chunks, we
* want to be thorough.
*/
STATIC xfs_buf_t *
xfs_trans_buf_item_match_all(
xfs_trans_t *tp,
xfs_buftarg_t *target,
xfs_daddr_t blkno,
int len)
{
xfs_log_item_chunk_t *licp;
xfs_log_item_desc_t *lidp;
xfs_buf_log_item_t *blip;
xfs_buf_t *bp;
int i;
bp = NULL;
len = BBTOB(len);
for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
if (XFS_LIC_ARE_ALL_FREE(licp)) {
ASSERT(licp == &tp->t_items);
ASSERT(licp->lic_next == NULL);
return NULL;
}
for (i = 0; i < licp->lic_unused; i++) {
/*
* Skip unoccupied slots.
*/
if (XFS_LIC_ISFREE(licp, i)) {
continue;
}
lidp = XFS_LIC_SLOT(licp, i);
blip = (xfs_buf_log_item_t *)lidp->lid_item;
if (blip->bli_item.li_type != XFS_LI_BUF) {
continue;
}
bp = blip->bli_buf;
if ((XFS_BUF_TARGET(bp) == target) &&
(XFS_BUF_ADDR(bp) == blkno) &&
(XFS_BUF_COUNT(bp) == len)) {
/*
* We found it. Break out and
* return the pointer to the buffer.
*/
return bp;
}
}
}
return NULL;
}
| gpl-2.0 |
hitomi2500/wasca | fpga_firmware/software/wasca_test9_bsp/drivers/src/altera_avalon_jtag_uart_fd.c | 92 | 4010 | /******************************************************************************
* *
* License Agreement *
* *
* Copyright (c) 2007 Altera Corporation, San Jose, California, USA. *
* All rights reserved. *
* *
* Permission is hereby granted, free of charge, to any person obtaining a *
* copy of this software and associated documentation files (the "Software"), *
* to deal in the Software without restriction, including without limitation *
* the rights to use, copy, modify, merge, publish, distribute, sublicense, *
* and/or sell copies of the Software, and to permit persons to whom the *
* Software is furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in *
* all copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *
* DEALINGS IN THE SOFTWARE. *
* *
* This agreement shall be governed in all respects by the laws of the State *
* of California and by the laws of the United States of America. *
* *
******************************************************************************/
#include "alt_types.h"
#include "sys/alt_dev.h"
#include "altera_avalon_jtag_uart.h"
extern int altera_avalon_jtag_uart_read(altera_avalon_jtag_uart_state* sp,
char* buffer, int space, int flags);
extern int altera_avalon_jtag_uart_write(altera_avalon_jtag_uart_state* sp,
const char* ptr, int count, int flags);
extern int altera_avalon_jtag_uart_ioctl(altera_avalon_jtag_uart_state* sp,
int req, void* arg);
extern int altera_avalon_jtag_uart_close(altera_avalon_jtag_uart_state* sp,
int flags);
/* ----------------------------------------------------------------------- */
/* --------------------- WRAPPERS FOR ALT FD SUPPORT --------------------- */
/*
*
*/
int
altera_avalon_jtag_uart_read_fd(alt_fd* fd, char* buffer, int space)
{
altera_avalon_jtag_uart_dev* dev = (altera_avalon_jtag_uart_dev*) fd->dev;
return altera_avalon_jtag_uart_read(&dev->state, buffer, space,
fd->fd_flags);
}
int
altera_avalon_jtag_uart_write_fd(alt_fd* fd, const char* buffer, int space)
{
altera_avalon_jtag_uart_dev* dev = (altera_avalon_jtag_uart_dev*) fd->dev;
return altera_avalon_jtag_uart_write(&dev->state, buffer, space,
fd->fd_flags);
}
#ifndef ALTERA_AVALON_JTAG_UART_SMALL
int
altera_avalon_jtag_uart_close_fd(alt_fd* fd)
{
altera_avalon_jtag_uart_dev* dev = (altera_avalon_jtag_uart_dev*) fd->dev;
return altera_avalon_jtag_uart_close(&dev->state, fd->fd_flags);
}
int
altera_avalon_jtag_uart_ioctl_fd(alt_fd* fd, int req, void* arg)
{
altera_avalon_jtag_uart_dev* dev = (altera_avalon_jtag_uart_dev*) fd->dev;
return altera_avalon_jtag_uart_ioctl(&dev->state, req, arg);
}
#endif /* ALTERA_AVALON_JTAG_UART_SMALL */
| gpl-2.0 |
bigzz/linux-linaro-lsk | drivers/iio/gyro/itg3200_buffer.c | 860 | 3716 | /*
* itg3200_buffer.c -- support InvenSense ITG3200
* Digital 3-Axis Gyroscope driver
*
* Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
* Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
* Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/gyro/itg3200.h>
static int itg3200_read_all_channels(struct i2c_client *i2c, __be16 *buf)
{
u8 tx = 0x80 | ITG3200_REG_TEMP_OUT_H;
struct i2c_msg msg[2] = {
{
.addr = i2c->addr,
.flags = i2c->flags,
.len = 1,
.buf = &tx,
},
{
.addr = i2c->addr,
.flags = i2c->flags | I2C_M_RD,
.len = ITG3200_SCAN_ELEMENTS * sizeof(s16),
.buf = (char *)&buf,
},
};
return i2c_transfer(i2c->adapter, msg, 2);
}
static irqreturn_t itg3200_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct itg3200 *st = iio_priv(indio_dev);
__be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)];
int ret = itg3200_read_all_channels(st->i2c, buf);
if (ret < 0)
goto error_ret;
if (indio_dev->scan_timestamp)
memcpy(buf + indio_dev->scan_bytes - sizeof(s64),
&pf->timestamp, sizeof(pf->timestamp));
iio_push_to_buffers(indio_dev, (u8 *)buf);
iio_trigger_notify_done(indio_dev->trig);
error_ret:
return IRQ_HANDLED;
}
int itg3200_buffer_configure(struct iio_dev *indio_dev)
{
return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
itg3200_trigger_handler, NULL);
}
void itg3200_buffer_unconfigure(struct iio_dev *indio_dev)
{
iio_triggered_buffer_cleanup(indio_dev);
}
static int itg3200_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
int ret;
u8 msc;
ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_IRQ_CONFIG, &msc);
if (ret)
goto error_ret;
if (state)
msc |= ITG3200_IRQ_DATA_RDY_ENABLE;
else
msc &= ~ITG3200_IRQ_DATA_RDY_ENABLE;
ret = itg3200_write_reg_8(indio_dev, ITG3200_REG_IRQ_CONFIG, msc);
if (ret)
goto error_ret;
error_ret:
return ret;
}
static const struct iio_trigger_ops itg3200_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = &itg3200_data_rdy_trigger_set_state,
};
int itg3200_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
struct itg3200 *st = iio_priv(indio_dev);
st->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
indio_dev->id);
if (!st->trig)
return -ENOMEM;
ret = request_irq(st->i2c->irq,
&iio_trigger_generic_data_rdy_poll,
IRQF_TRIGGER_RISING,
"itg3200_data_rdy",
st->trig);
if (ret)
goto error_free_trig;
st->trig->dev.parent = &st->i2c->dev;
st->trig->ops = &itg3200_trigger_ops;
iio_trigger_set_drvdata(st->trig, indio_dev);
ret = iio_trigger_register(st->trig);
if (ret)
goto error_free_irq;
/* select default trigger */
indio_dev->trig = iio_trigger_get(st->trig);
return 0;
error_free_irq:
free_irq(st->i2c->irq, st->trig);
error_free_trig:
iio_trigger_free(st->trig);
return ret;
}
void itg3200_remove_trigger(struct iio_dev *indio_dev)
{
struct itg3200 *st = iio_priv(indio_dev);
iio_trigger_unregister(st->trig);
free_irq(st->i2c->irq, st->trig);
iio_trigger_free(st->trig);
}
| gpl-2.0 |
SaberMod/android_kernel_moto_shamu | drivers/media/platform/msm/dvb/demux/mpq_sdmx.c | 1372 | 25542 | /* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include "qseecom_kernel.h"
#include "mpq_sdmx.h"
static struct qseecom_handle *sdmx_qseecom_handles[SDMX_MAX_SESSIONS];
static struct mutex sdmx_lock[SDMX_MAX_SESSIONS];
#define QSEECOM_ALIGN_SIZE 0x40
#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1)
#define QSEECOM_ALIGN(x) \
((x + QSEECOM_ALIGN_SIZE) & (~QSEECOM_ALIGN_MASK))
enum sdmx_cmd_id {
SDMX_OPEN_SESSION_CMD,
SDMX_CLOSE_SESSION_CMD,
SDMX_SET_SESSION_CFG_CMD,
SDMX_ADD_FILTER_CMD,
SDMX_REMOVE_FILTER_CMD,
SDMX_SET_KL_IDX_CMD,
SDMX_ADD_RAW_PID_CMD,
SDMX_REMOVE_RAW_PID_CMD,
SDMX_PROCESS_CMD,
SDMX_GET_DBG_COUNTERS_CMD,
SDMX_RESET_DBG_COUNTERS_CMD,
SDMX_GET_VERSION_CMD,
SDMX_INVALIDATE_KL_CMD,
SDMX_SET_LOG_LEVEL_CMD
};
struct sdmx_proc_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
u8 flags;
struct sdmx_buff_descr in_buf_descr;
u32 inp_fill_cnt;
u32 in_rd_offset;
u32 num_filters;
struct sdmx_filter_status filters_status[];
};
struct sdmx_proc_rsp {
enum sdmx_status ret;
u32 inp_fill_cnt;
u32 in_rd_offset;
u32 err_indicators;
u32 status_indicators;
};
struct sdmx_open_ses_req {
enum sdmx_cmd_id cmd_id;
};
struct sdmx_open_ses_rsp {
enum sdmx_status ret;
u32 session_handle;
};
struct sdmx_close_ses_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
};
struct sdmx_close_ses_rsp {
enum sdmx_status ret;
};
struct sdmx_ses_cfg_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
enum sdmx_proc_mode process_mode;
enum sdmx_inp_mode input_mode;
enum sdmx_pkt_format packet_len;
u8 odd_scramble_bits;
u8 even_scramble_bits;
};
struct sdmx_ses_cfg_rsp {
enum sdmx_status ret;
};
struct sdmx_set_kl_ind_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
u32 pid;
u32 kl_index;
};
struct sdmx_set_kl_ind_rsp {
enum sdmx_status ret;
};
struct sdmx_add_filt_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
u32 pid;
enum sdmx_filter filter_type;
struct sdmx_buff_descr meta_data_buf;
enum sdmx_buf_mode buffer_mode;
enum sdmx_raw_out_format ts_out_format;
u32 flags;
u32 num_data_bufs;
struct sdmx_data_buff_descr data_bufs[];
};
struct sdmx_add_filt_rsp {
enum sdmx_status ret;
u32 filter_handle;
};
struct sdmx_rem_filt_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
u32 filter_handle;
};
struct sdmx_rem_filt_rsp {
enum sdmx_status ret;
};
struct sdmx_add_raw_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
u32 filter_handle;
u32 pid;
};
struct sdmx_add_raw_rsp {
enum sdmx_status ret;
};
struct sdmx_rem_raw_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
u32 filter_handle;
u32 pid;
};
struct sdmx_rem_raw_rsp {
enum sdmx_status ret;
};
struct sdmx_get_counters_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
u32 num_filters;
};
struct sdmx_get_counters_rsp {
enum sdmx_status ret;
struct sdmx_session_dbg_counters session_counters;
u32 num_filters;
struct sdmx_filter_dbg_counters filter_counters[];
};
struct sdmx_rst_counters_req {
enum sdmx_cmd_id cmd_id;
u32 session_handle;
};
struct sdmx_rst_counters_rsp {
enum sdmx_status ret;
};
struct sdmx_get_version_req {
enum sdmx_cmd_id cmd_id;
};
struct sdmx_get_version_rsp {
enum sdmx_status ret;
int32_t version;
};
struct sdmx_set_log_level_req {
enum sdmx_cmd_id cmd_id;
enum sdmx_log_level level;
u32 session_handle;
};
struct sdmx_set_log_level_rsp {
enum sdmx_status ret;
};
static void get_cmd_rsp_buffers(int handle_index,
void **cmd,
int *cmd_len,
void **rsp,
int *rsp_len)
{
*cmd = sdmx_qseecom_handles[handle_index]->sbuf;
if (*cmd_len & QSEECOM_ALIGN_MASK)
*cmd_len = QSEECOM_ALIGN(*cmd_len);
*rsp = sdmx_qseecom_handles[handle_index]->sbuf + *cmd_len;
if (*rsp_len & QSEECOM_ALIGN_MASK)
*rsp_len = QSEECOM_ALIGN(*rsp_len);
}
/*
* Returns version of secure-demux app.
*
* @session_handle: Returned instance handle. Must not be NULL.
* Return error code
*/
int sdmx_get_version(int session_handle, int32_t *version)
{
int res, cmd_len, rsp_len;
struct sdmx_get_version_req *cmd;
struct sdmx_get_version_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
(version == NULL))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_get_version_req);
rsp_len = sizeof(struct sdmx_get_version_rsp);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_GET_VERSION_CMD;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
ret = rsp->ret;
*version = rsp->version;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_get_version);
/*
* Initializes a new secure demux instance and returns a handle of the instance.
*
* @session_handle: handle of a secure demux instance to get its version.
* Return the version if successfull or an error code.
*/
int sdmx_open_session(int *session_handle)
{
int res, cmd_len, rsp_len;
enum sdmx_status ret, version_ret;
struct sdmx_open_ses_req *cmd;
struct sdmx_open_ses_rsp *rsp;
struct qseecom_handle *qseecom_handle = NULL;
int32_t version;
/* Input validation */
if (session_handle == NULL)
return SDMX_STATUS_GENERAL_FAILURE;
/* Start the TZ app */
res = qseecom_start_app(&qseecom_handle, "securemm", 4096);
if (res < 0)
return SDMX_STATUS_GENERAL_FAILURE;
cmd_len = sizeof(struct sdmx_open_ses_req);
rsp_len = sizeof(struct sdmx_open_ses_rsp);
/* Get command and response buffers */
cmd = (struct sdmx_open_ses_req *)qseecom_handle->sbuf;
if (cmd_len & QSEECOM_ALIGN_MASK)
cmd_len = QSEECOM_ALIGN(cmd_len);
rsp = (struct sdmx_open_ses_rsp *)qseecom_handle->sbuf + cmd_len;
if (rsp_len & QSEECOM_ALIGN_MASK)
rsp_len = QSEECOM_ALIGN(rsp_len);
/* Will be later overridden by SDMX response */
*session_handle = SDMX_INVALID_SESSION_HANDLE;
/* Populate command struct */
cmd->cmd_id = SDMX_OPEN_SESSION_CMD;
/* Issue QSEECom command */
res = qseecom_send_command(qseecom_handle, (void *)cmd, cmd_len,
(void *)rsp, rsp_len);
if (res < 0) {
qseecom_shutdown_app(&qseecom_handle);
return SDMX_STATUS_GENERAL_FAILURE;
}
/* Parse response struct */
*session_handle = rsp->session_handle;
/* Initialize handle and mutex */
sdmx_qseecom_handles[*session_handle] = qseecom_handle;
mutex_init(&sdmx_lock[*session_handle]);
ret = rsp->ret;
/* Get and print the app version */
version_ret = sdmx_get_version(*session_handle, &version);
if (SDMX_SUCCESS == version_ret)
pr_info("TZ SDMX version is %x.%x\n", version >> 8,
version & 0xFF);
else
pr_err("Error reading TZ SDMX version\n");
return ret;
}
EXPORT_SYMBOL(sdmx_open_session);
/*
* Closes a secure demux instance.
*
* @session_handle: handle of a secure demux instance to close.
* Return error code
*/
int sdmx_close_session(int session_handle)
{
int res, cmd_len, rsp_len;
struct sdmx_close_ses_req *cmd;
struct sdmx_close_ses_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_close_ses_req);
rsp_len = sizeof(struct sdmx_close_ses_rsp);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_CLOSE_SESSION_CMD;
cmd->session_handle = session_handle;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
ret = rsp->ret;
/* Shutdown the TZ app (or at least free the current handle) */
res = qseecom_shutdown_app(&sdmx_qseecom_handles[session_handle]);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
sdmx_qseecom_handles[session_handle] = NULL;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_close_session);
/*
* Configures an open secure demux instance.
*
* @session_handle: secure demux instance
* @proc_mode: Defines secure demux's behavior in case of output
* buffer overflow.
* @inp_mode: Defines the input encryption settings.
* @pkt_format: TS packet length in input buffer.
* @odd_scramble_bits: Value of the scramble bits indicating the ODD key.
* @even_scramble_bits: Value of the scramble bits indicating the EVEN key.
* Return error code
*/
int sdmx_set_session_cfg(int session_handle,
enum sdmx_proc_mode proc_mode,
enum sdmx_inp_mode inp_mode,
enum sdmx_pkt_format pkt_format,
u8 odd_scramble_bits,
u8 even_scramble_bits)
{
int res, cmd_len, rsp_len;
struct sdmx_ses_cfg_req *cmd;
struct sdmx_ses_cfg_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_ses_cfg_req);
rsp_len = sizeof(struct sdmx_ses_cfg_rsp);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_SET_SESSION_CFG_CMD;
cmd->session_handle = session_handle;
cmd->process_mode = proc_mode;
cmd->input_mode = inp_mode;
cmd->packet_len = pkt_format;
cmd->odd_scramble_bits = odd_scramble_bits;
cmd->even_scramble_bits = even_scramble_bits;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
ret = rsp->ret;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_set_session_cfg);
/*
* Creates a new secure demux filter and returns a filter handle
*
* @session_handle: secure demux instance
* @pid: pid to filter
* @filter_type: type of filtering
* @meta_data_buf: meta data buffer descriptor
* @data_buf_mode: data buffer mode (ring/linear)
* @num_data_bufs: number of data buffers (use 1 for a ring buffer)
* @data_bufs: data buffers descriptors array
* @filter_handle: returned filter handle
* @ts_out_format: output format for raw filters
* @flags: optional flags for filter
* (currently only clear section CRC verification is supported)
*
* Return error code
*/
int sdmx_add_filter(int session_handle,
u16 pid,
enum sdmx_filter filterype,
struct sdmx_buff_descr *meta_data_buf,
enum sdmx_buf_mode d_buf_mode,
u32 num_data_bufs,
struct sdmx_data_buff_descr *data_bufs,
int *filter_handle,
enum sdmx_raw_out_format ts_out_format,
u32 flags)
{
int res, cmd_len, rsp_len;
struct sdmx_add_filt_req *cmd;
struct sdmx_add_filt_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
(filter_handle == NULL))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_add_filt_req)
+ num_data_bufs * sizeof(struct sdmx_data_buff_descr);
rsp_len = sizeof(struct sdmx_add_filt_rsp);
/* Will be later overridden by SDMX response */
*filter_handle = SDMX_INVALID_FILTER_HANDLE;
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_ADD_FILTER_CMD;
cmd->session_handle = session_handle;
cmd->pid = (u32)pid;
cmd->filter_type = filterype;
cmd->ts_out_format = ts_out_format;
cmd->flags = flags;
if (meta_data_buf != NULL)
memcpy(&(cmd->meta_data_buf), meta_data_buf,
sizeof(struct sdmx_buff_descr));
else
memset(&(cmd->meta_data_buf), 0,
sizeof(struct sdmx_buff_descr));
cmd->buffer_mode = d_buf_mode;
cmd->num_data_bufs = num_data_bufs;
memcpy(cmd->data_bufs, data_bufs,
num_data_bufs * sizeof(struct sdmx_data_buff_descr));
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
/* Parse response struct */
*filter_handle = rsp->filter_handle;
ret = rsp->ret;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_add_filter);
/*
* Removes a secure demux filter
*
* @session_handle: secure demux instance
* @filter_handle: filter handle to remove
*
* Return error code
*/
int sdmx_remove_filter(int session_handle, int filter_handle)
{
int res, cmd_len, rsp_len;
struct sdmx_rem_filt_req *cmd;
struct sdmx_rem_filt_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_rem_filt_req);
rsp_len = sizeof(struct sdmx_rem_filt_rsp);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_REMOVE_FILTER_CMD;
cmd->session_handle = session_handle;
cmd->filter_handle = filter_handle;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
ret = rsp->ret;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_remove_filter);
/*
* Associates a key ladder index for the specified pid
*
* @session_handle: secure demux instance
* @pid: pid
* @key_ladder_index: key ladder index to associate to the pid
*
* Return error code
*
* Note: if pid already has some key ladder index associated, it will be
* overridden.
*/
int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index)
{
int res, cmd_len, rsp_len;
struct sdmx_set_kl_ind_req *cmd;
struct sdmx_set_kl_ind_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_set_kl_ind_req);
rsp_len = sizeof(struct sdmx_set_kl_ind_rsp);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_SET_KL_IDX_CMD;
cmd->session_handle = session_handle;
cmd->pid = (u32)pid;
cmd->kl_index = key_ladder_index;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
ret = rsp->ret;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_set_kl_ind);
/*
* Adds the specified pid to an existing raw (recording) filter
*
* @session_handle: secure demux instance
* @filter_handle: raw filter handle
* @pid: pid
*
* Return error code
*/
int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid)
{
int res, cmd_len, rsp_len;
struct sdmx_add_raw_req *cmd;
struct sdmx_add_raw_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_add_raw_req);
rsp_len = sizeof(struct sdmx_add_raw_rsp);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_ADD_RAW_PID_CMD;
cmd->session_handle = session_handle;
cmd->filter_handle = filter_handle;
cmd->pid = (u32)pid;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
ret = rsp->ret;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_add_raw_pid);
/*
* Removes the specified pid from a raw (recording) filter
*
* @session_handle: secure demux instance
* @filter_handle: raw filter handle
* @pid: pid
*
* Return error code
*/
int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid)
{
int res, cmd_len, rsp_len;
struct sdmx_rem_raw_req *cmd;
struct sdmx_rem_raw_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_rem_raw_req);
rsp_len = sizeof(struct sdmx_rem_raw_rsp);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_REMOVE_RAW_PID_CMD;
cmd->session_handle = session_handle;
cmd->filter_handle = filter_handle;
cmd->pid = (u32)pid;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
ret = rsp->ret;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_remove_raw_pid);
/*
* Call secure demux to perform processing on the specified input buffer
*
* @session_handle: secure demux instance
* @flags: input flags. Currently only EOS marking is supported.
* @input_buf_desc: input buffer descriptor
* @input_fill_count: number of bytes available in input buffer
* @input_read_offset: offset inside input buffer where data starts
* @error_indicators: returned general error indicators
* @status_indicators: returned general status indicators
* @num_filters: number of filters in filter status array
* @filter_status: filter status descriptor array
*
* Return error code
*/
int sdmx_process(int session_handle, u8 flags,
struct sdmx_buff_descr *input_buf_desc,
u32 *input_fill_count,
u32 *input_read_offset,
u32 *error_indicators,
u32 *status_indicators,
u32 num_filters,
struct sdmx_filter_status *filter_status)
{
int res, cmd_len, rsp_len;
struct sdmx_proc_req *cmd;
struct sdmx_proc_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
(input_buf_desc == NULL) ||
(input_fill_count == NULL) || (input_read_offset == NULL) ||
(error_indicators == NULL) || (status_indicators == NULL) ||
(filter_status == NULL))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_proc_req)
+ num_filters * sizeof(struct sdmx_filter_status);
rsp_len = sizeof(struct sdmx_proc_rsp);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_PROCESS_CMD;
cmd->session_handle = session_handle;
cmd->flags = flags;
cmd->in_buf_descr.base_addr = input_buf_desc->base_addr;
cmd->in_buf_descr.size = input_buf_desc->size;
cmd->inp_fill_cnt = *input_fill_count;
cmd->in_rd_offset = *input_read_offset;
cmd->num_filters = num_filters;
memcpy(cmd->filters_status, filter_status,
num_filters * sizeof(struct sdmx_filter_status));
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
/* Parse response struct */
*input_fill_count = rsp->inp_fill_cnt;
*input_read_offset = rsp->in_rd_offset;
*error_indicators = rsp->err_indicators;
*status_indicators = rsp->status_indicators;
memcpy(filter_status, cmd->filters_status,
num_filters * sizeof(struct sdmx_filter_status));
ret = rsp->ret;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_process);
/*
* Returns session-level & filter-level debug counters
*
* @session_handle: secure demux instance
* @session_counters: returned session-level debug counters
* @num_filters: returned number of filters reported in filter_counters
* @filter_counters: returned filter-level debug counters array
*
* Return error code
*/
int sdmx_get_dbg_counters(int session_handle,
struct sdmx_session_dbg_counters *session_counters,
u32 *num_filters,
struct sdmx_filter_dbg_counters *filter_counters)
{
int res, cmd_len, rsp_len;
struct sdmx_get_counters_req *cmd;
struct sdmx_get_counters_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
(session_counters == NULL) || (num_filters == NULL) ||
(filter_counters == NULL))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_get_counters_req);
rsp_len = sizeof(struct sdmx_get_counters_rsp)
+ *num_filters * sizeof(struct sdmx_filter_dbg_counters);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_GET_DBG_COUNTERS_CMD;
cmd->session_handle = session_handle;
cmd->num_filters = *num_filters;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
/* Parse response struct */
*session_counters = rsp->session_counters;
*num_filters = rsp->num_filters;
memcpy(filter_counters, rsp->filter_counters,
*num_filters * sizeof(struct sdmx_filter_dbg_counters));
ret = rsp->ret;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_get_dbg_counters);
/*
* Reset debug counters
*
* @session_handle: secure demux instance
*
* Return error code
*/
int sdmx_reset_dbg_counters(int session_handle)
{
int res, cmd_len, rsp_len;
struct sdmx_rst_counters_req *cmd;
struct sdmx_rst_counters_rsp *rsp;
enum sdmx_status ret;
if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
return SDMX_STATUS_INVALID_INPUT_PARAMS;
cmd_len = sizeof(struct sdmx_rst_counters_req);
rsp_len = sizeof(struct sdmx_rst_counters_rsp);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Populate command struct */
cmd->cmd_id = SDMX_RESET_DBG_COUNTERS_CMD;
cmd->session_handle = session_handle;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
ret = rsp->ret;
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
EXPORT_SYMBOL(sdmx_reset_dbg_counters);
/*
* Set debug log verbosity level
*
* @session_handle: secure demux instance
* @level: requested log level
*
* Return error code
*/
int sdmx_set_log_level(int session_handle, enum sdmx_log_level level)
{
int res, cmd_len, rsp_len;
struct sdmx_set_log_level_req *cmd;
struct sdmx_set_log_level_rsp *rsp;
enum sdmx_status ret;
cmd_len = sizeof(struct sdmx_set_log_level_req);
rsp_len = sizeof(struct sdmx_set_log_level_rsp);
/* Get command and response buffers */
get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
(void **)&rsp, &rsp_len);
/* Lock shared memory */
mutex_lock(&sdmx_lock[session_handle]);
/* Populate command struct */
cmd->cmd_id = SDMX_SET_LOG_LEVEL_CMD;
cmd->session_handle = session_handle;
cmd->level = level;
/* Issue QSEECom command */
res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
(void *)cmd, cmd_len, (void *)rsp, rsp_len);
if (res < 0) {
mutex_unlock(&sdmx_lock[session_handle]);
return SDMX_STATUS_GENERAL_FAILURE;
}
ret = rsp->ret;
/* Unlock */
mutex_unlock(&sdmx_lock[session_handle]);
return ret;
}
| gpl-2.0 |
ffolkes/plasmakernel_note4_tw_lp | drivers/net/ethernet/amd/pcnet32.c | 1628 | 81888 | /* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
/*
* Copyright 1996-1999 Thomas Bogendoerfer
*
* Derived from the lance driver written 1993,1994,1995 by Donald Becker.
*
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* This driver is for PCnet32 and PCnetPCI based ethercards
*/
/**************************************************************************
* 23 Oct, 2000.
* Fixed a few bugs, related to running the controller in 32bit mode.
*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*
*************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "pcnet32"
#define DRV_VERSION "1.35"
#define DRV_RELDATE "21.Apr.2008"
#define PFX DRV_NAME ": "
static const char *const version =
DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/dma.h>
#include <asm/irq.h>
/*
* PCI device identifiers for "new style" Linux PCI Device Drivers
*/
static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
/*
* Adapters that were sold with IBM's RS/6000 or pSeries hardware have
* the incorrect vendor id.
*/
{ PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
.class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
static int cards_found;
/*
* VLB I/O addresses
*/
static unsigned int pcnet32_portlist[] =
{ 0x300, 0x320, 0x340, 0x360, 0 };
static int pcnet32_debug;
static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
static int pcnet32vlb; /* check for VLB cards ? */
static struct net_device *pcnet32_dev;
static int max_interrupt_work = 2;
static int rx_copybreak = 200;
#define PCNET32_PORT_AUI 0x00
#define PCNET32_PORT_10BT 0x01
#define PCNET32_PORT_GPSI 0x02
#define PCNET32_PORT_MII 0x03
#define PCNET32_PORT_PORTSEL 0x03
#define PCNET32_PORT_ASEL 0x04
#define PCNET32_PORT_100 0x40
#define PCNET32_PORT_FD 0x80
#define PCNET32_DMA_MASK 0xffffffff
#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
/*
* table to translate option values from tulip
* to internal options
*/
static const unsigned char options_mapping[] = {
PCNET32_PORT_ASEL, /* 0 Auto-select */
PCNET32_PORT_AUI, /* 1 BNC/AUI */
PCNET32_PORT_AUI, /* 2 AUI/BNC */
PCNET32_PORT_ASEL, /* 3 not supported */
PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
PCNET32_PORT_ASEL, /* 5 not supported */
PCNET32_PORT_ASEL, /* 6 not supported */
PCNET32_PORT_ASEL, /* 7 not supported */
PCNET32_PORT_ASEL, /* 8 not supported */
PCNET32_PORT_MII, /* 9 MII 10baseT */
PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
PCNET32_PORT_MII, /* 11 MII (autosel) */
PCNET32_PORT_10BT, /* 12 10BaseT */
PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
/* 14 MII 100BaseTx-FD */
PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
PCNET32_PORT_ASEL /* 15 not supported */
};
static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
"Loopback test (offline)"
};
#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test)
#define PCNET32_NUM_REGS 136
#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS];
static int full_duplex[MAX_UNITS];
static int homepna[MAX_UNITS];
/*
* Theory of Operation
*
* This driver uses the same software structure as the normal lance
* driver. So look for a verbose description in lance.c. The differences
* to the normal lance driver is the use of the 32bit mode of PCnet32
* and PCnetPCI chips. Because these chips are 32bit chips, there is no
* 16MB limitation and we don't need bounce buffers.
*/
/*
* Set the number of Tx and Rx buffers, using Log_2(# buffers).
* Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
* That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
*/
#ifndef PCNET32_LOG_TX_BUFFERS
#define PCNET32_LOG_TX_BUFFERS 4
#define PCNET32_LOG_RX_BUFFERS 5
#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
#define PCNET32_LOG_MAX_RX_BUFFERS 9
#endif
#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
#define PKT_BUF_SKB 1544
/* actual buffer length after being aligned */
#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
/* chip wants twos complement of the (aligned) buffer length */
#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
/* Offsets from base I/O address. */
#define PCNET32_WIO_RDP 0x10
#define PCNET32_WIO_RAP 0x12
#define PCNET32_WIO_RESET 0x14
#define PCNET32_WIO_BDP 0x16
#define PCNET32_DWIO_RDP 0x10
#define PCNET32_DWIO_RAP 0x14
#define PCNET32_DWIO_RESET 0x18
#define PCNET32_DWIO_BDP 0x1C
#define PCNET32_TOTAL_SIZE 0x20
#define CSR0 0
#define CSR0_INIT 0x1
#define CSR0_START 0x2
#define CSR0_STOP 0x4
#define CSR0_TXPOLL 0x8
#define CSR0_INTEN 0x40
#define CSR0_IDON 0x0100
#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
#define PCNET32_INIT_LOW 1
#define PCNET32_INIT_HIGH 2
#define CSR3 3
#define CSR4 4
#define CSR5 5
#define CSR5_SUSPEND 0x0001
#define CSR15 15
#define PCNET32_MC_FILTER 8
#define PCNET32_79C970A 0x2621
/* The PCNET32 Rx and Tx ring descriptors. */
struct pcnet32_rx_head {
__le32 base;
__le16 buf_length; /* two`s complement of length */
__le16 status;
__le32 msg_length;
__le32 reserved;
};
struct pcnet32_tx_head {
__le32 base;
__le16 length; /* two`s complement of length */
__le16 status;
__le32 misc;
__le32 reserved;
};
/* The PCNET32 32-Bit initialization block, described in databook. */
struct pcnet32_init_block {
__le16 mode;
__le16 tlen_rlen;
u8 phys_addr[6];
__le16 reserved;
__le32 filter[2];
/* Receive and transmit ring base, along with extra bits. */
__le32 rx_ring;
__le32 tx_ring;
};
/* PCnet32 access functions */
struct pcnet32_access {
u16 (*read_csr) (unsigned long, int);
void (*write_csr) (unsigned long, int, u16);
u16 (*read_bcr) (unsigned long, int);
void (*write_bcr) (unsigned long, int, u16);
u16 (*read_rap) (unsigned long);
void (*write_rap) (unsigned long, u16);
void (*reset) (unsigned long);
};
/*
* The first field of pcnet32_private is read by the ethernet device
* so the structure should be allocated using pci_alloc_consistent().
*/
struct pcnet32_private {
struct pcnet32_init_block *init_block;
/* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
struct pcnet32_rx_head *rx_ring;
struct pcnet32_tx_head *tx_ring;
dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
returned by pci_alloc_consistent */
struct pci_dev *pci_dev;
const char *name;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct sk_buff **tx_skbuff;
struct sk_buff **rx_skbuff;
dma_addr_t *tx_dma_addr;
dma_addr_t *rx_dma_addr;
const struct pcnet32_access *a;
spinlock_t lock; /* Guard lock */
unsigned int cur_rx, cur_tx; /* The next free ring entry */
unsigned int rx_ring_size; /* current rx ring size */
unsigned int tx_ring_size; /* current tx ring size */
unsigned int rx_mod_mask; /* rx ring modular mask */
unsigned int tx_mod_mask; /* tx ring modular mask */
unsigned short rx_len_bits;
unsigned short tx_len_bits;
dma_addr_t rx_ring_dma_addr;
dma_addr_t tx_ring_dma_addr;
unsigned int dirty_rx, /* ring entries to be freed. */
dirty_tx;
struct net_device *dev;
struct napi_struct napi;
char tx_full;
char phycount; /* number of phys found */
int options;
unsigned int shared_irq:1, /* shared irq possible */
dxsuflo:1, /* disable transmit stop on uflo */
mii:1; /* mii port available */
struct net_device *next;
struct mii_if_info mii_if;
struct timer_list watchdog_timer;
u32 msg_enable; /* debug message level */
/* each bit indicates an available PHY */
u32 phymask;
unsigned short chip_version; /* which variant this is */
/* saved registers during ethtool blink */
u16 save_regs[4];
};
static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
static int pcnet32_open(struct net_device *);
static int pcnet32_init_ring(struct net_device *);
static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
struct net_device *);
static void pcnet32_tx_timeout(struct net_device *dev);
static irqreturn_t pcnet32_interrupt(int, void *);
static int pcnet32_close(struct net_device *);
static struct net_device_stats *pcnet32_get_stats(struct net_device *);
static void pcnet32_load_multicast(struct net_device *dev);
static void pcnet32_set_multicast_list(struct net_device *);
static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
static void pcnet32_watchdog(struct net_device *);
static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
int val);
static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
static void pcnet32_ethtool_test(struct net_device *dev,
struct ethtool_test *eth_test, u64 * data);
static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
static int pcnet32_get_regs_len(struct net_device *dev);
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr);
static void pcnet32_purge_tx_ring(struct net_device *dev);
static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
static void pcnet32_free_ring(struct net_device *dev);
static void pcnet32_check_media(struct net_device *dev, int verbose);
static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
{
outw(index, addr + PCNET32_WIO_RAP);
return inw(addr + PCNET32_WIO_RDP);
}
static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
{
outw(index, addr + PCNET32_WIO_RAP);
outw(val, addr + PCNET32_WIO_RDP);
}
static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
{
outw(index, addr + PCNET32_WIO_RAP);
return inw(addr + PCNET32_WIO_BDP);
}
static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
{
outw(index, addr + PCNET32_WIO_RAP);
outw(val, addr + PCNET32_WIO_BDP);
}
static u16 pcnet32_wio_read_rap(unsigned long addr)
{
return inw(addr + PCNET32_WIO_RAP);
}
static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
{
outw(val, addr + PCNET32_WIO_RAP);
}
static void pcnet32_wio_reset(unsigned long addr)
{
inw(addr + PCNET32_WIO_RESET);
}
static int pcnet32_wio_check(unsigned long addr)
{
outw(88, addr + PCNET32_WIO_RAP);
return inw(addr + PCNET32_WIO_RAP) == 88;
}
static const struct pcnet32_access pcnet32_wio = {
.read_csr = pcnet32_wio_read_csr,
.write_csr = pcnet32_wio_write_csr,
.read_bcr = pcnet32_wio_read_bcr,
.write_bcr = pcnet32_wio_write_bcr,
.read_rap = pcnet32_wio_read_rap,
.write_rap = pcnet32_wio_write_rap,
.reset = pcnet32_wio_reset
};
static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
{
outl(index, addr + PCNET32_DWIO_RAP);
return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
}
static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
{
outl(index, addr + PCNET32_DWIO_RAP);
outl(val, addr + PCNET32_DWIO_RDP);
}
static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
{
outl(index, addr + PCNET32_DWIO_RAP);
return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
}
static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
{
outl(index, addr + PCNET32_DWIO_RAP);
outl(val, addr + PCNET32_DWIO_BDP);
}
static u16 pcnet32_dwio_read_rap(unsigned long addr)
{
return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
}
static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
{
outl(val, addr + PCNET32_DWIO_RAP);
}
static void pcnet32_dwio_reset(unsigned long addr)
{
inl(addr + PCNET32_DWIO_RESET);
}
static int pcnet32_dwio_check(unsigned long addr)
{
outl(88, addr + PCNET32_DWIO_RAP);
return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
}
static const struct pcnet32_access pcnet32_dwio = {
.read_csr = pcnet32_dwio_read_csr,
.write_csr = pcnet32_dwio_write_csr,
.read_bcr = pcnet32_dwio_read_bcr,
.write_bcr = pcnet32_dwio_write_bcr,
.read_rap = pcnet32_dwio_read_rap,
.write_rap = pcnet32_dwio_write_rap,
.reset = pcnet32_dwio_reset
};
static void pcnet32_netif_stop(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
napi_disable(&lp->napi);
netif_tx_disable(dev);
}
static void pcnet32_netif_start(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
ulong ioaddr = dev->base_addr;
u16 val;
netif_wake_queue(dev);
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
napi_enable(&lp->napi);
}
/*
* Allocate space for the new sized tx ring.
* Free old resources
* Save new resources.
* Any failure keeps old resources.
* Must be called with lp->lock held.
*/
static void pcnet32_realloc_tx_ring(struct net_device *dev,
struct pcnet32_private *lp,
unsigned int size)
{
dma_addr_t new_ring_dma_addr;
dma_addr_t *new_dma_addr_list;
struct pcnet32_tx_head *new_tx_ring;
struct sk_buff **new_skb_list;
pcnet32_purge_tx_ring(dev);
new_tx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
(1 << size),
&new_ring_dma_addr);
if (new_tx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!new_dma_addr_list)
goto free_new_tx_ring;
new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list)
goto free_new_lists;
kfree(lp->tx_skbuff);
kfree(lp->tx_dma_addr);
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
lp->tx_ring_size, lp->tx_ring,
lp->tx_ring_dma_addr);
lp->tx_ring_size = (1 << size);
lp->tx_mod_mask = lp->tx_ring_size - 1;
lp->tx_len_bits = (size << 12);
lp->tx_ring = new_tx_ring;
lp->tx_ring_dma_addr = new_ring_dma_addr;
lp->tx_dma_addr = new_dma_addr_list;
lp->tx_skbuff = new_skb_list;
return;
free_new_lists:
kfree(new_dma_addr_list);
free_new_tx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
(1 << size),
new_tx_ring,
new_ring_dma_addr);
}
/*
* Allocate space for the new sized rx ring.
* Re-use old receive buffers.
* alloc extra buffers
* free unneeded buffers
* free unneeded buffers
* Save new resources.
* Any failure keeps old resources.
* Must be called with lp->lock held.
*/
static void pcnet32_realloc_rx_ring(struct net_device *dev,
struct pcnet32_private *lp,
unsigned int size)
{
dma_addr_t new_ring_dma_addr;
dma_addr_t *new_dma_addr_list;
struct pcnet32_rx_head *new_rx_ring;
struct sk_buff **new_skb_list;
int new, overlap;
new_rx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
(1 << size),
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
if (!new_dma_addr_list)
goto free_new_rx_ring;
new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list)
goto free_new_lists;
/* first copy the current receive buffers */
overlap = min(size, lp->rx_ring_size);
for (new = 0; new < overlap; new++) {
new_rx_ring[new] = lp->rx_ring[new];
new_dma_addr_list[new] = lp->rx_dma_addr[new];
new_skb_list[new] = lp->rx_skbuff[new];
}
/* now allocate any new buffers needed */
for (; new < size; new++) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = new_skb_list[new];
if (!rx_skbuff) {
/* keep the original lists and buffers */
netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
__func__);
goto free_all_new;
}
skb_reserve(rx_skbuff, NET_IP_ALIGN);
new_dma_addr_list[new] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
new_rx_ring[new].status = cpu_to_le16(0x8000);
}
/* and free any unneeded buffers */
for (; new < lp->rx_ring_size; new++) {
if (lp->rx_skbuff[new]) {
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_skbuff[new]);
}
}
kfree(lp->rx_skbuff);
kfree(lp->rx_dma_addr);
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
lp->rx_ring_size, lp->rx_ring,
lp->rx_ring_dma_addr);
lp->rx_ring_size = (1 << size);
lp->rx_mod_mask = lp->rx_ring_size - 1;
lp->rx_len_bits = (size << 4);
lp->rx_ring = new_rx_ring;
lp->rx_ring_dma_addr = new_ring_dma_addr;
lp->rx_dma_addr = new_dma_addr_list;
lp->rx_skbuff = new_skb_list;
return;
free_all_new:
while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) {
pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(new_skb_list[new]);
}
}
kfree(new_skb_list);
free_new_lists:
kfree(new_dma_addr_list);
free_new_rx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
(1 << size),
new_rx_ring,
new_ring_dma_addr);
}
static void pcnet32_purge_rx_ring(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
int i;
/* free all allocated skbuffs */
for (i = 0; i < lp->rx_ring_size; i++) {
lp->rx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->rx_skbuff[i]) {
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(lp->rx_skbuff[i]);
}
lp->rx_skbuff[i] = NULL;
lp->rx_dma_addr[i] = 0;
}
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void pcnet32_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
pcnet32_interrupt(0, dev);
enable_irq(dev->irq);
}
#endif
static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
int r = -EOPNOTSUPP;
if (lp->mii) {
spin_lock_irqsave(&lp->lock, flags);
mii_ethtool_gset(&lp->mii_if, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
r = 0;
}
return r;
}
static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
int r = -EOPNOTSUPP;
if (lp->mii) {
spin_lock_irqsave(&lp->lock, flags);
r = mii_ethtool_sset(&lp->mii_if, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
}
return r;
}
static void pcnet32_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct pcnet32_private *lp = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (lp->pci_dev)
strlcpy(info->bus_info, pci_name(lp->pci_dev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
"VLB 0x%lx", dev->base_addr);
}
static u32 pcnet32_get_link(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
int r;
spin_lock_irqsave(&lp->lock, flags);
if (lp->mii) {
r = mii_link_ok(&lp->mii_if);
} else if (lp->chip_version >= PCNET32_79C970A) {
ulong ioaddr = dev->base_addr; /* card base I/O address */
r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
} else { /* can not detect link on really old chips */
r = 1;
}
spin_unlock_irqrestore(&lp->lock, flags);
return r;
}
static u32 pcnet32_get_msglevel(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
return lp->msg_enable;
}
static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
{
struct pcnet32_private *lp = netdev_priv(dev);
lp->msg_enable = value;
}
static int pcnet32_nway_reset(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
int r = -EOPNOTSUPP;
if (lp->mii) {
spin_lock_irqsave(&lp->lock, flags);
r = mii_nway_restart(&lp->mii_if);
spin_unlock_irqrestore(&lp->lock, flags);
}
return r;
}
static void pcnet32_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct pcnet32_private *lp = netdev_priv(dev);
ering->tx_max_pending = TX_MAX_RING_SIZE;
ering->tx_pending = lp->tx_ring_size;
ering->rx_max_pending = RX_MAX_RING_SIZE;
ering->rx_pending = lp->rx_ring_size;
}
static int pcnet32_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
unsigned int size;
ulong ioaddr = dev->base_addr;
int i;
if (ering->rx_mini_pending || ering->rx_jumbo_pending)
return -EINVAL;
if (netif_running(dev))
pcnet32_netif_stop(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
/* set the minimum ring size to 4, to allow the loopback test to work
* unchanged.
*/
for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
if (size <= (1 << i))
break;
}
if ((1 << i) != lp->tx_ring_size)
pcnet32_realloc_tx_ring(dev, lp, i);
size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
if (size <= (1 << i))
break;
}
if ((1 << i) != lp->rx_ring_size)
pcnet32_realloc_rx_ring(dev, lp, i);
lp->napi.weight = lp->rx_ring_size / 2;
if (netif_running(dev)) {
pcnet32_netif_start(dev);
pcnet32_restart(dev, CSR0_NORMAL);
}
spin_unlock_irqrestore(&lp->lock, flags);
netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
lp->rx_ring_size, lp->tx_ring_size);
return 0;
}
static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
}
static int pcnet32_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
return PCNET32_TEST_LEN;
default:
return -EOPNOTSUPP;
}
}
static void pcnet32_ethtool_test(struct net_device *dev,
struct ethtool_test *test, u64 * data)
{
struct pcnet32_private *lp = netdev_priv(dev);
int rc;
if (test->flags == ETH_TEST_FL_OFFLINE) {
rc = pcnet32_loopback_test(dev, data);
if (rc) {
netif_printk(lp, hw, KERN_DEBUG, dev,
"Loopback test failed\n");
test->flags |= ETH_TEST_FL_FAILED;
} else
netif_printk(lp, hw, KERN_DEBUG, dev,
"Loopback test passed\n");
} else
netif_printk(lp, hw, KERN_DEBUG, dev,
"No tests to run (specify 'Offline' on ethtool)\n");
} /* end pcnet32_ethtool_test */
static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
{
struct pcnet32_private *lp = netdev_priv(dev);
const struct pcnet32_access *a = lp->a; /* access to registers */
ulong ioaddr = dev->base_addr; /* card base I/O address */
struct sk_buff *skb; /* sk buff */
int x, i; /* counters */
int numbuffs = 4; /* number of TX/RX buffers and descs */
u16 status = 0x8300; /* TX ring status */
__le16 teststatus; /* test of ring status */
int rc; /* return code */
int size; /* size of packets */
unsigned char *packet; /* source packet data */
static const int data_len = 60; /* length of source packets */
unsigned long flags;
unsigned long ticks;
rc = 1; /* default to fail */
if (netif_running(dev))
pcnet32_netif_stop(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
/* Reset the PCNET32 */
lp->a->reset(ioaddr);
lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
/* switch pcnet32 to 32bit mode */
lp->a->write_bcr(ioaddr, 20, 2);
/* purge & init rings but don't actually restart */
pcnet32_restart(dev, 0x0000);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
/* Initialize Transmit buffers. */
size = data_len + 15;
for (x = 0; x < numbuffs; x++) {
skb = netdev_alloc_skb(dev, size);
if (!skb) {
netif_printk(lp, hw, KERN_DEBUG, dev,
"Cannot allocate skb at line: %d!\n",
__LINE__);
goto clean_up;
}
packet = skb->data;
skb_put(skb, size); /* create space for data */
lp->tx_skbuff[x] = skb;
lp->tx_ring[x].length = cpu_to_le16(-skb->len);
lp->tx_ring[x].misc = 0;
/* put DA and SA into the skb */
for (i = 0; i < 6; i++)
*packet++ = dev->dev_addr[i];
for (i = 0; i < 6; i++)
*packet++ = dev->dev_addr[i];
/* type */
*packet++ = 0x08;
*packet++ = 0x06;
/* packet number */
*packet++ = x;
/* fill packet with data */
for (i = 0; i < data_len; i++)
*packet++ = i;
lp->tx_dma_addr[x] =
pci_map_single(lp->pci_dev, skb->data, skb->len,
PCI_DMA_TODEVICE);
lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[x].status = cpu_to_le16(status);
}
x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
a->write_bcr(ioaddr, 32, x | 0x0002);
/* set int loopback in CSR15 */
x = a->read_csr(ioaddr, CSR15) & 0xfffc;
lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
teststatus = cpu_to_le16(0x8000);
lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
/* Check status of descriptors */
for (x = 0; x < numbuffs; x++) {
ticks = 0;
rmb();
while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
spin_unlock_irqrestore(&lp->lock, flags);
msleep(1);
spin_lock_irqsave(&lp->lock, flags);
rmb();
ticks++;
}
if (ticks == 200) {
netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
break;
}
}
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
wmb();
if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
for (x = 0; x < numbuffs; x++) {
netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
skb = lp->rx_skbuff[x];
for (i = 0; i < size; i++)
pr_cont(" %02x", *(skb->data + i));
pr_cont("\n");
}
}
x = 0;
rc = 0;
while (x < numbuffs && !rc) {
skb = lp->rx_skbuff[x];
packet = lp->tx_skbuff[x]->data;
for (i = 0; i < size; i++) {
if (*(skb->data + i) != packet[i]) {
netif_printk(lp, hw, KERN_DEBUG, dev,
"Error in compare! %2x - %02x %02x\n",
i, *(skb->data + i), packet[i]);
rc = 1;
break;
}
}
x++;
}
clean_up:
*data1 = rc;
pcnet32_purge_tx_ring(dev);
x = a->read_csr(ioaddr, CSR15);
a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
a->write_bcr(ioaddr, 32, (x & ~0x0002));
if (netif_running(dev)) {
pcnet32_netif_start(dev);
pcnet32_restart(dev, CSR0_NORMAL);
} else {
pcnet32_purge_rx_ring(dev);
lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
return rc;
} /* end pcnet32_loopback_test */
static int pcnet32_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct pcnet32_private *lp = netdev_priv(dev);
const struct pcnet32_access *a = lp->a;
ulong ioaddr = dev->base_addr;
unsigned long flags;
int i;
switch (state) {
case ETHTOOL_ID_ACTIVE:
/* Save the current value of the bcrs */
spin_lock_irqsave(&lp->lock, flags);
for (i = 4; i < 8; i++)
lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
spin_unlock_irqrestore(&lp->lock, flags);
return 2; /* cycle on/off twice per second */
case ETHTOOL_ID_ON:
case ETHTOOL_ID_OFF:
/* Blink the led */
spin_lock_irqsave(&lp->lock, flags);
for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
spin_unlock_irqrestore(&lp->lock, flags);
break;
case ETHTOOL_ID_INACTIVE:
/* Restore the original value of the bcrs */
spin_lock_irqsave(&lp->lock, flags);
for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
spin_unlock_irqrestore(&lp->lock, flags);
}
return 0;
}
/*
* lp->lock must be held.
*/
static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
int can_sleep)
{
int csr5;
struct pcnet32_private *lp = netdev_priv(dev);
const struct pcnet32_access *a = lp->a;
ulong ioaddr = dev->base_addr;
int ticks;
/* really old chips have to be stopped. */
if (lp->chip_version < PCNET32_79C970A)
return 0;
/* set SUSPEND (SPND) - CSR5 bit 0 */
csr5 = a->read_csr(ioaddr, CSR5);
a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
/* poll waiting for bit to be set */
ticks = 0;
while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
spin_unlock_irqrestore(&lp->lock, *flags);
if (can_sleep)
msleep(1);
else
mdelay(1);
spin_lock_irqsave(&lp->lock, *flags);
ticks++;
if (ticks > 200) {
netif_printk(lp, hw, KERN_DEBUG, dev,
"Error getting into suspend!\n");
return 0;
}
}
return 1;
}
/*
* process one receive descriptor entry
*/
static void pcnet32_rx_entry(struct net_device *dev,
struct pcnet32_private *lp,
struct pcnet32_rx_head *rxp,
int entry)
{
int status = (short)le16_to_cpu(rxp->status) >> 8;
int rx_in_place = 0;
struct sk_buff *skb;
short pkt_len;
if (status != 0x03) { /* There was an error. */
/*
* There is a tricky error noted by John Murphy,
* <murf@perftech.com> to Russ Nelson: Even with full-sized
* buffers it's possible for a jabber packet to use two
* buffers, with only the last correctly noting the error.
*/
if (status & 0x01) /* Only count a general error at the */
dev->stats.rx_errors++; /* end of a packet. */
if (status & 0x20)
dev->stats.rx_frame_errors++;
if (status & 0x10)
dev->stats.rx_over_errors++;
if (status & 0x08)
dev->stats.rx_crc_errors++;
if (status & 0x04)
dev->stats.rx_fifo_errors++;
return;
}
pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
/* Discard oversize frames. */
if (unlikely(pkt_len > PKT_BUF_SIZE)) {
netif_err(lp, drv, dev, "Impossible packet size %d!\n",
pkt_len);
dev->stats.rx_errors++;
return;
}
if (pkt_len < 60) {
netif_err(lp, rx_err, dev, "Runt packet!\n");
dev->stats.rx_errors++;
return;
}
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN);
skb = lp->rx_skbuff[entry];
pci_unmap_single(lp->pci_dev,
lp->rx_dma_addr[entry],
PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len);
lp->rx_skbuff[entry] = newskb;
lp->rx_dma_addr[entry] =
pci_map_single(lp->pci_dev,
newskb->data,
PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE);
rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
rx_in_place = 1;
} else
skb = NULL;
} else
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
if (skb == NULL) {
dev->stats.rx_dropped++;
return;
}
if (!rx_in_place) {
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len); /* Make room */
pci_dma_sync_single_for_cpu(lp->pci_dev,
lp->rx_dma_addr[entry],
pkt_len,
PCI_DMA_FROMDEVICE);
skb_copy_to_linear_data(skb,
(unsigned char *)(lp->rx_skbuff[entry]->data),
pkt_len);
pci_dma_sync_single_for_device(lp->pci_dev,
lp->rx_dma_addr[entry],
pkt_len,
PCI_DMA_FROMDEVICE);
}
dev->stats.rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
dev->stats.rx_packets++;
}
static int pcnet32_rx(struct net_device *dev, int budget)
{
struct pcnet32_private *lp = netdev_priv(dev);
int entry = lp->cur_rx & lp->rx_mod_mask;
struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
int npackets = 0;
/* If we own the next entry, it's a new packet. Send it up. */
while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
pcnet32_rx_entry(dev, lp, rxp, entry);
npackets += 1;
/*
* The docs say that the buffer length isn't touched, but Andrew
* Boyd of QNX reports that some revs of the 79C965 clear it.
*/
rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after others are visible */
rxp->status = cpu_to_le16(0x8000);
entry = (++lp->cur_rx) & lp->rx_mod_mask;
rxp = &lp->rx_ring[entry];
}
return npackets;
}
static int pcnet32_tx(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned int dirty_tx = lp->dirty_tx;
int delta;
int must_restart = 0;
while (dirty_tx != lp->cur_tx) {
int entry = dirty_tx & lp->tx_mod_mask;
int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
if (status < 0)
break; /* It still hasn't been Txed */
lp->tx_ring[entry].base = 0;
if (status & 0x4000) {
/* There was a major error, log it. */
int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
dev->stats.tx_errors++;
netif_err(lp, tx_err, dev,
"Tx error status=%04x err_status=%08x\n",
status, err_status);
if (err_status & 0x04000000)
dev->stats.tx_aborted_errors++;
if (err_status & 0x08000000)
dev->stats.tx_carrier_errors++;
if (err_status & 0x10000000)
dev->stats.tx_window_errors++;
#ifndef DO_DXSUFLO
if (err_status & 0x40000000) {
dev->stats.tx_fifo_errors++;
/* Ackk! On FIFO errors the Tx unit is turned off! */
/* Remove this verbosity later! */
netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
}
#else
if (err_status & 0x40000000) {
dev->stats.tx_fifo_errors++;
if (!lp->dxsuflo) { /* If controller doesn't recover ... */
/* Ackk! On FIFO errors the Tx unit is turned off! */
/* Remove this verbosity later! */
netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
}
}
#endif
} else {
if (status & 0x1800)
dev->stats.collisions++;
dev->stats.tx_packets++;
}
/* We must free the original skb */
if (lp->tx_skbuff[entry]) {
pci_unmap_single(lp->pci_dev,
lp->tx_dma_addr[entry],
lp->tx_skbuff[entry]->
len, PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->tx_skbuff[entry]);
lp->tx_skbuff[entry] = NULL;
lp->tx_dma_addr[entry] = 0;
}
dirty_tx++;
}
delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
if (delta > lp->tx_ring_size) {
netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
dirty_tx, lp->cur_tx, lp->tx_full);
dirty_tx += lp->tx_ring_size;
delta -= lp->tx_ring_size;
}
if (lp->tx_full &&
netif_queue_stopped(dev) &&
delta < lp->tx_ring_size - 2) {
/* The ring is no longer full, clear tbusy. */
lp->tx_full = 0;
netif_wake_queue(dev);
}
lp->dirty_tx = dirty_tx;
return must_restart;
}
static int pcnet32_poll(struct napi_struct *napi, int budget)
{
struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
struct net_device *dev = lp->dev;
unsigned long ioaddr = dev->base_addr;
unsigned long flags;
int work_done;
u16 val;
work_done = pcnet32_rx(dev, budget);
spin_lock_irqsave(&lp->lock, flags);
if (pcnet32_tx(dev)) {
/* reset the chip to clear the error condition, then restart */
lp->a->reset(ioaddr);
lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
pcnet32_restart(dev, CSR0_START);
netif_wake_queue(dev);
}
spin_unlock_irqrestore(&lp->lock, flags);
if (work_done < budget) {
spin_lock_irqsave(&lp->lock, flags);
__napi_complete(napi);
/* clear interrupt masks */
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
/* Set interrupt enable. */
lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
spin_unlock_irqrestore(&lp->lock, flags);
}
return work_done;
}
#define PCNET32_REGS_PER_PHY 32
#define PCNET32_MAX_PHYS 32
static int pcnet32_get_regs_len(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
int j = lp->phycount * PCNET32_REGS_PER_PHY;
return (PCNET32_NUM_REGS + j) * sizeof(u16);
}
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr)
{
int i, csr0;
u16 *buff = ptr;
struct pcnet32_private *lp = netdev_priv(dev);
const struct pcnet32_access *a = lp->a;
ulong ioaddr = dev->base_addr;
unsigned long flags;
spin_lock_irqsave(&lp->lock, flags);
csr0 = a->read_csr(ioaddr, CSR0);
if (!(csr0 & CSR0_STOP)) /* If not stopped */
pcnet32_suspend(dev, &flags, 1);
/* read address PROM */
for (i = 0; i < 16; i += 2)
*buff++ = inw(ioaddr + i);
/* read control and status registers */
for (i = 0; i < 90; i++)
*buff++ = a->read_csr(ioaddr, i);
*buff++ = a->read_csr(ioaddr, 112);
*buff++ = a->read_csr(ioaddr, 114);
/* read bus configuration registers */
for (i = 0; i < 30; i++)
*buff++ = a->read_bcr(ioaddr, i);
*buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
for (i = 31; i < 36; i++)
*buff++ = a->read_bcr(ioaddr, i);
/* read mii phy registers */
if (lp->mii) {
int j;
for (j = 0; j < PCNET32_MAX_PHYS; j++) {
if (lp->phymask & (1 << j)) {
for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
lp->a->write_bcr(ioaddr, 33,
(j << 5) | i);
*buff++ = lp->a->read_bcr(ioaddr, 34);
}
}
}
}
if (!(csr0 & CSR0_STOP)) { /* If not stopped */
int csr5;
/* clear SUSPEND (SPND) - CSR5 bit 0 */
csr5 = a->read_csr(ioaddr, CSR5);
a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
}
spin_unlock_irqrestore(&lp->lock, flags);
}
static const struct ethtool_ops pcnet32_ethtool_ops = {
.get_settings = pcnet32_get_settings,
.set_settings = pcnet32_set_settings,
.get_drvinfo = pcnet32_get_drvinfo,
.get_msglevel = pcnet32_get_msglevel,
.set_msglevel = pcnet32_set_msglevel,
.nway_reset = pcnet32_nway_reset,
.get_link = pcnet32_get_link,
.get_ringparam = pcnet32_get_ringparam,
.set_ringparam = pcnet32_set_ringparam,
.get_strings = pcnet32_get_strings,
.self_test = pcnet32_ethtool_test,
.set_phys_id = pcnet32_set_phys_id,
.get_regs_len = pcnet32_get_regs_len,
.get_regs = pcnet32_get_regs,
.get_sset_count = pcnet32_get_sset_count,
};
/* only probes for non-PCI devices, the rest are handled by
* pci_register_driver via pcnet32_probe_pci */
static void pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
{
unsigned int *port, ioaddr;
/* search for PCnet32 VLB cards at known addresses */
for (port = pcnet32_portlist; (ioaddr = *port); port++) {
if (request_region
(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
/* check if there is really a pcnet chip on that ioaddr */
if ((inb(ioaddr + 14) == 0x57) &&
(inb(ioaddr + 15) == 0x57)) {
pcnet32_probe1(ioaddr, 0, NULL);
} else {
release_region(ioaddr, PCNET32_TOTAL_SIZE);
}
}
}
}
static int
pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned long ioaddr;
int err;
err = pci_enable_device(pdev);
if (err < 0) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("failed to enable device -- err=%d\n", err);
return err;
}
pci_set_master(pdev);
ioaddr = pci_resource_start(pdev, 0);
if (!ioaddr) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("card has no PCI IO resources, aborting\n");
return -ENODEV;
}
if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
return -ENODEV;
}
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("io address range already allocated\n");
return -EBUSY;
}
err = pcnet32_probe1(ioaddr, 1, pdev);
if (err < 0)
pci_disable_device(pdev);
return err;
}
static const struct net_device_ops pcnet32_netdev_ops = {
.ndo_open = pcnet32_open,
.ndo_stop = pcnet32_close,
.ndo_start_xmit = pcnet32_start_xmit,
.ndo_tx_timeout = pcnet32_tx_timeout,
.ndo_get_stats = pcnet32_get_stats,
.ndo_set_rx_mode = pcnet32_set_multicast_list,
.ndo_do_ioctl = pcnet32_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = pcnet32_poll_controller,
#endif
};
/* pcnet32_probe1
* Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
* pdev will be NULL when called from pcnet32_probe_vlbus.
*/
static int
pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
{
struct pcnet32_private *lp;
int i, media;
int fdx, mii, fset, dxsuflo;
int chip_version;
char *chipname;
struct net_device *dev;
const struct pcnet32_access *a = NULL;
u8 promaddr[6];
int ret = -ENODEV;
/* reset the chip */
pcnet32_wio_reset(ioaddr);
/* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
a = &pcnet32_wio;
} else {
pcnet32_dwio_reset(ioaddr);
if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
pcnet32_dwio_check(ioaddr)) {
a = &pcnet32_dwio;
} else {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("No access methods\n");
goto err_release_region;
}
}
chip_version =
a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
pr_info(" PCnet chip version is %#x\n", chip_version);
if ((chip_version & 0xfff) != 0x003) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_info("Unsupported chip version\n");
goto err_release_region;
}
/* initialize variables */
fdx = mii = fset = dxsuflo = 0;
chip_version = (chip_version >> 12) & 0xffff;
switch (chip_version) {
case 0x2420:
chipname = "PCnet/PCI 79C970"; /* PCI */
break;
case 0x2430:
if (shared)
chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
else
chipname = "PCnet/32 79C965"; /* 486/VL bus */
break;
case 0x2621:
chipname = "PCnet/PCI II 79C970A"; /* PCI */
fdx = 1;
break;
case 0x2623:
chipname = "PCnet/FAST 79C971"; /* PCI */
fdx = 1;
mii = 1;
fset = 1;
break;
case 0x2624:
chipname = "PCnet/FAST+ 79C972"; /* PCI */
fdx = 1;
mii = 1;
fset = 1;
break;
case 0x2625:
chipname = "PCnet/FAST III 79C973"; /* PCI */
fdx = 1;
mii = 1;
break;
case 0x2626:
chipname = "PCnet/Home 79C978"; /* PCI */
fdx = 1;
/*
* This is based on specs published at www.amd.com. This section
* assumes that a card with a 79C978 wants to go into standard
* ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
* and the module option homepna=1 can select this instead.
*/
media = a->read_bcr(ioaddr, 49);
media &= ~3; /* default to 10Mb ethernet */
if (cards_found < MAX_UNITS && homepna[cards_found])
media |= 1; /* switch to home wiring mode */
if (pcnet32_debug & NETIF_MSG_PROBE)
printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
(media & 1) ? "1" : "10");
a->write_bcr(ioaddr, 49, media);
break;
case 0x2627:
chipname = "PCnet/FAST III 79C975"; /* PCI */
fdx = 1;
mii = 1;
break;
case 0x2628:
chipname = "PCnet/PRO 79C976";
fdx = 1;
mii = 1;
break;
default:
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_info("PCnet version %#x, no PCnet32 chip\n",
chip_version);
goto err_release_region;
}
/*
* On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
* starting until the packet is loaded. Strike one for reliability, lose
* one for latency - although on PCI this isn't a big loss. Older chips
* have FIFO's smaller than a packet, so you can't do this.
* Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
*/
if (fset) {
a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
a->write_csr(ioaddr, 80,
(a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
dxsuflo = 1;
}
dev = alloc_etherdev(sizeof(*lp));
if (!dev) {
ret = -ENOMEM;
goto err_release_region;
}
if (pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_info("%s at %#3lx,", chipname, ioaddr);
/* In most chips, after a chip reset, the ethernet address is read from the
* station address PROM at the base address and programmed into the
* "Physical Address Registers" CSR12-14.
* As a precautionary measure, we read the PROM values and complain if
* they disagree with the CSRs. If they miscompare, and the PROM addr
* is valid, then the PROM addr is used.
*/
for (i = 0; i < 3; i++) {
unsigned int val;
val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
/* There may be endianness issues here. */
dev->dev_addr[2 * i] = val & 0x0ff;
dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
}
/* read PROM address and compare with CSR address */
for (i = 0; i < 6; i++)
promaddr[i] = inb(ioaddr + i);
if (memcmp(promaddr, dev->dev_addr, 6) ||
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
pr_cont(" warning: CSR address invalid,\n");
pr_info(" using instead PROM address of");
}
memcpy(dev->dev_addr, promaddr, 6);
}
}
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
if (!is_valid_ether_addr(dev->dev_addr))
memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
pr_cont(" %pM", dev->dev_addr);
/* Version 0x2623 and 0x2624 */
if (((chip_version + 1) & 0xfffe) == 0x2624) {
i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
pr_info(" tx_start_pt(0x%04x):", i);
switch (i >> 10) {
case 0:
pr_cont(" 20 bytes,");
break;
case 1:
pr_cont(" 64 bytes,");
break;
case 2:
pr_cont(" 128 bytes,");
break;
case 3:
pr_cont("~220 bytes,");
break;
}
i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
pr_cont(" BCR18(%x):", i & 0xffff);
if (i & (1 << 5))
pr_cont("BurstWrEn ");
if (i & (1 << 6))
pr_cont("BurstRdEn ");
if (i & (1 << 7))
pr_cont("DWordIO ");
if (i & (1 << 11))
pr_cont("NoUFlow ");
i = a->read_bcr(ioaddr, 25);
pr_info(" SRAMSIZE=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 26);
pr_cont(" SRAM_BND=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 27);
if (i & (1 << 14))
pr_cont("LowLatRx");
}
}
dev->base_addr = ioaddr;
lp = netdev_priv(dev);
/* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
&lp->init_dma_addr);
if (!lp->init_block) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("Consistent memory allocation failed\n");
ret = -ENOMEM;
goto err_free_netdev;
}
lp->pci_dev = pdev;
lp->dev = dev;
spin_lock_init(&lp->lock);
lp->name = chipname;
lp->shared_irq = shared;
lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
lp->tx_mod_mask = lp->tx_ring_size - 1;
lp->rx_mod_mask = lp->rx_ring_size - 1;
lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
lp->mii_if.full_duplex = fdx;
lp->mii_if.phy_id_mask = 0x1f;
lp->mii_if.reg_num_mask = 0x1f;
lp->dxsuflo = dxsuflo;
lp->mii = mii;
lp->chip_version = chip_version;
lp->msg_enable = pcnet32_debug;
if ((cards_found >= MAX_UNITS) ||
(options[cards_found] >= sizeof(options_mapping)))
lp->options = PCNET32_PORT_ASEL;
else
lp->options = options_mapping[options[cards_found]];
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = mdio_read;
lp->mii_if.mdio_write = mdio_write;
/* napi.weight is used in both the napi and non-napi cases */
lp->napi.weight = lp->rx_ring_size / 2;
netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
lp->options |= PCNET32_PORT_FD;
lp->a = a;
/* prior to register_netdev, dev->name is not yet correct */
if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
ret = -ENOMEM;
goto err_free_ring;
}
/* detect special T1/E1 WAN card by checking for MAC address */
if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
dev->dev_addr[2] == 0x75)
lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
lp->init_block->tlen_rlen =
cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
for (i = 0; i < 6; i++)
lp->init_block->phys_addr[i] = dev->dev_addr[i];
lp->init_block->filter[0] = 0x00000000;
lp->init_block->filter[1] = 0x00000000;
lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
/* switch pcnet32 to 32bit mode */
a->write_bcr(ioaddr, 20, 2);
a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
if (pdev) { /* use the IRQ provided by PCI */
dev->irq = pdev->irq;
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_cont(" assigned IRQ %d\n", dev->irq);
} else {
unsigned long irq_mask = probe_irq_on();
/*
* To auto-IRQ we enable the initialization-done and DMA error
* interrupts. For ISA boards we get a DMA error, but VLB and PCI
* boards will work.
*/
/* Trigger an initialization just for the interrupt. */
a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
mdelay(1);
dev->irq = probe_irq_off(irq_mask);
if (!dev->irq) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_cont(", failed to detect IRQ line\n");
ret = -ENODEV;
goto err_free_ring;
}
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_cont(", probed IRQ %d\n", dev->irq);
}
/* Set the mii phy_id so that we can query the link state */
if (lp->mii) {
/* lp->phycount and lp->phymask are set to 0 by memset above */
lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
/* scan for PHYs */
for (i = 0; i < PCNET32_MAX_PHYS; i++) {
unsigned short id1, id2;
id1 = mdio_read(dev, i, MII_PHYSID1);
if (id1 == 0xffff)
continue;
id2 = mdio_read(dev, i, MII_PHYSID2);
if (id2 == 0xffff)
continue;
if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
continue; /* 79C971 & 79C972 have phantom phy at id 31 */
lp->phycount++;
lp->phymask |= (1 << i);
lp->mii_if.phy_id = i;
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_info("Found PHY %04x:%04x at address %d\n",
id1, id2, i);
}
lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
if (lp->phycount > 1)
lp->options |= PCNET32_PORT_MII;
}
init_timer(&lp->watchdog_timer);
lp->watchdog_timer.data = (unsigned long)dev;
lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
/* The PCNET32-specific entries in the device structure. */
dev->netdev_ops = &pcnet32_netdev_ops;
dev->ethtool_ops = &pcnet32_ethtool_ops;
dev->watchdog_timeo = (5 * HZ);
/* Fill in the generic fields of the device structure. */
if (register_netdev(dev))
goto err_free_ring;
if (pdev) {
pci_set_drvdata(pdev, dev);
} else {
lp->next = pcnet32_dev;
pcnet32_dev = dev;
}
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_info("%s: registered as %s\n", dev->name, lp->name);
cards_found++;
/* enable LED writes */
a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
return 0;
err_free_ring:
pcnet32_free_ring(dev);
pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
err_free_netdev:
free_netdev(dev);
err_release_region:
release_region(ioaddr, PCNET32_TOTAL_SIZE);
return ret;
}
/* if any allocation fails, caller must also call pcnet32_free_ring */
static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
{
struct pcnet32_private *lp = netdev_priv(dev);
lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
lp->tx_ring_size,
&lp->tx_ring_dma_addr);
if (lp->tx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return -ENOMEM;
}
lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
lp->rx_ring_size,
&lp->rx_ring_dma_addr);
if (lp->rx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return -ENOMEM;
}
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->tx_dma_addr)
return -ENOMEM;
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->rx_dma_addr)
return -ENOMEM;
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->tx_skbuff)
return -ENOMEM;
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->rx_skbuff)
return -ENOMEM;
return 0;
}
static void pcnet32_free_ring(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
kfree(lp->tx_skbuff);
lp->tx_skbuff = NULL;
kfree(lp->rx_skbuff);
lp->rx_skbuff = NULL;
kfree(lp->tx_dma_addr);
lp->tx_dma_addr = NULL;
kfree(lp->rx_dma_addr);
lp->rx_dma_addr = NULL;
if (lp->tx_ring) {
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
lp->tx_ring_size, lp->tx_ring,
lp->tx_ring_dma_addr);
lp->tx_ring = NULL;
}
if (lp->rx_ring) {
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
lp->rx_ring_size, lp->rx_ring,
lp->rx_ring_dma_addr);
lp->rx_ring = NULL;
}
}
static int pcnet32_open(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pci_dev;
unsigned long ioaddr = dev->base_addr;
u16 val;
int i;
int rc;
unsigned long flags;
if (request_irq(dev->irq, pcnet32_interrupt,
lp->shared_irq ? IRQF_SHARED : 0, dev->name,
(void *)dev)) {
return -EAGAIN;
}
spin_lock_irqsave(&lp->lock, flags);
/* Check for a valid station address */
if (!is_valid_ether_addr(dev->dev_addr)) {
rc = -EINVAL;
goto err_free_irq;
}
/* Reset the PCNET32 */
lp->a->reset(ioaddr);
/* switch pcnet32 to 32bit mode */
lp->a->write_bcr(ioaddr, 20, 2);
netif_printk(lp, ifup, KERN_DEBUG, dev,
"%s() irq %d tx/rx rings %#x/%#x init %#x\n",
__func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
(u32) (lp->rx_ring_dma_addr),
(u32) (lp->init_dma_addr));
/* set/reset autoselect bit */
val = lp->a->read_bcr(ioaddr, 2) & ~2;
if (lp->options & PCNET32_PORT_ASEL)
val |= 2;
lp->a->write_bcr(ioaddr, 2, val);
/* handle full duplex setting */
if (lp->mii_if.full_duplex) {
val = lp->a->read_bcr(ioaddr, 9) & ~3;
if (lp->options & PCNET32_PORT_FD) {
val |= 1;
if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
val |= 2;
} else if (lp->options & PCNET32_PORT_ASEL) {
/* workaround of xSeries250, turn on for 79C975 only */
if (lp->chip_version == 0x2627)
val |= 3;
}
lp->a->write_bcr(ioaddr, 9, val);
}
/* set/reset GPSI bit in test register */
val = lp->a->read_csr(ioaddr, 124) & ~0x10;
if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
val |= 0x10;
lp->a->write_csr(ioaddr, 124, val);
/* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
(pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
if (lp->options & PCNET32_PORT_ASEL) {
lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
netif_printk(lp, link, KERN_DEBUG, dev,
"Setting 100Mb-Full Duplex\n");
}
}
if (lp->phycount < 2) {
/*
* 24 Jun 2004 according AMD, in order to change the PHY,
* DANAS (or DISPM for 79C976) must be set; then select the speed,
* duplex, and/or enable auto negotiation, and clear DANAS
*/
if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
lp->a->write_bcr(ioaddr, 32,
lp->a->read_bcr(ioaddr, 32) | 0x0080);
/* disable Auto Negotiation, set 10Mpbs, HD */
val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
if (lp->options & PCNET32_PORT_FD)
val |= 0x10;
if (lp->options & PCNET32_PORT_100)
val |= 0x08;
lp->a->write_bcr(ioaddr, 32, val);
} else {
if (lp->options & PCNET32_PORT_ASEL) {
lp->a->write_bcr(ioaddr, 32,
lp->a->read_bcr(ioaddr,
32) | 0x0080);
/* enable auto negotiate, setup, disable fd */
val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
val |= 0x20;
lp->a->write_bcr(ioaddr, 32, val);
}
}
} else {
int first_phy = -1;
u16 bmcr;
u32 bcr9;
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
/*
* There is really no good other way to handle multiple PHYs
* other than turning off all automatics
*/
val = lp->a->read_bcr(ioaddr, 2);
lp->a->write_bcr(ioaddr, 2, val & ~2);
val = lp->a->read_bcr(ioaddr, 32);
lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
if (!(lp->options & PCNET32_PORT_ASEL)) {
/* setup ecmd */
ecmd.port = PORT_MII;
ecmd.transceiver = XCVR_INTERNAL;
ecmd.autoneg = AUTONEG_DISABLE;
ethtool_cmd_speed_set(&ecmd,
(lp->options & PCNET32_PORT_100) ?
SPEED_100 : SPEED_10);
bcr9 = lp->a->read_bcr(ioaddr, 9);
if (lp->options & PCNET32_PORT_FD) {
ecmd.duplex = DUPLEX_FULL;
bcr9 |= (1 << 0);
} else {
ecmd.duplex = DUPLEX_HALF;
bcr9 |= ~(1 << 0);
}
lp->a->write_bcr(ioaddr, 9, bcr9);
}
for (i = 0; i < PCNET32_MAX_PHYS; i++) {
if (lp->phymask & (1 << i)) {
/* isolate all but the first PHY */
bmcr = mdio_read(dev, i, MII_BMCR);
if (first_phy == -1) {
first_phy = i;
mdio_write(dev, i, MII_BMCR,
bmcr & ~BMCR_ISOLATE);
} else {
mdio_write(dev, i, MII_BMCR,
bmcr | BMCR_ISOLATE);
}
/* use mii_ethtool_sset to setup PHY */
lp->mii_if.phy_id = i;
ecmd.phy_address = i;
if (lp->options & PCNET32_PORT_ASEL) {
mii_ethtool_gset(&lp->mii_if, &ecmd);
ecmd.autoneg = AUTONEG_ENABLE;
}
mii_ethtool_sset(&lp->mii_if, &ecmd);
}
}
lp->mii_if.phy_id = first_phy;
netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
}
#ifdef DO_DXSUFLO
if (lp->dxsuflo) { /* Disable transmit stop on underflow */
val = lp->a->read_csr(ioaddr, CSR3);
val |= 0x40;
lp->a->write_csr(ioaddr, CSR3, val);
}
#endif
lp->init_block->mode =
cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
pcnet32_load_multicast(dev);
if (pcnet32_init_ring(dev)) {
rc = -ENOMEM;
goto err_free_ring;
}
napi_enable(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
netif_start_queue(dev);
if (lp->chip_version >= PCNET32_79C970A) {
/* Print the link status and start the watchdog */
pcnet32_check_media(dev, 1);
mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
}
i = 0;
while (i++ < 100)
if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
break;
/*
* We used to clear the InitDone bit, 0x0100, here but Mark Stockton
* reports that doing so triggers a bug in the '974.
*/
lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
netif_printk(lp, ifup, KERN_DEBUG, dev,
"pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
i,
(u32) (lp->init_dma_addr),
lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
return 0; /* Always succeed */
err_free_ring:
/* free any allocated skbuffs */
pcnet32_purge_rx_ring(dev);
/*
* Switch back to 16bit mode to avoid problems with dumb
* DOS packet driver after a warm reboot
*/
lp->a->write_bcr(ioaddr, 20, 4);
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
free_irq(dev->irq, dev);
return rc;
}
/*
* The LANCE has been halted for one reason or another (busmaster memory
* arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
* etc.). Modern LANCE variants always reload their ring-buffer
* configuration when restarted, so we must reinitialize our ring
* context before restarting. As part of this reinitialization,
* find all packets still on the Tx ring and pretend that they had been
* sent (in effect, drop the packets on the floor) - the higher-level
* protocols will time out and retransmit. It'd be better to shuffle
* these skbs to a temp list and then actually re-Tx them after
* restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
*/
static void pcnet32_purge_tx_ring(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
int i;
for (i = 0; i < lp->tx_ring_size; i++) {
lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->tx_skbuff[i]) {
pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
lp->tx_skbuff[i]->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->tx_skbuff[i]);
}
lp->tx_skbuff[i] = NULL;
lp->tx_dma_addr[i] = 0;
}
}
/* Initialize the PCNET32 Rx and Tx rings. */
static int pcnet32_init_ring(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
int i;
lp->tx_full = 0;
lp->cur_rx = lp->cur_tx = 0;
lp->dirty_rx = lp->dirty_tx = 0;
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
if (rx_skbuff == NULL) {
lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = lp->rx_skbuff[i];
if (!rx_skbuff) {
/* there is not much we can do at this point */
netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
__func__);
return -1;
}
skb_reserve(rx_skbuff, NET_IP_ALIGN);
}
rmb();
if (lp->rx_dma_addr[i] == 0)
lp->rx_dma_addr[i] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after all others are visible */
lp->rx_ring[i].status = cpu_to_le16(0x8000);
}
/* The Tx buffer address is filled in as needed, but we do need to clear
* the upper ownership bit. */
for (i = 0; i < lp->tx_ring_size; i++) {
lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
lp->tx_ring[i].base = 0;
lp->tx_dma_addr[i] = 0;
}
lp->init_block->tlen_rlen =
cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
for (i = 0; i < 6; i++)
lp->init_block->phys_addr[i] = dev->dev_addr[i];
lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
wmb(); /* Make sure all changes are visible */
return 0;
}
/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
* then flush the pending transmit operations, re-initialize the ring,
* and tell the chip to initialize.
*/
static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr;
int i;
/* wait for stop */
for (i = 0; i < 100; i++)
if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
break;
if (i >= 100)
netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
__func__);
pcnet32_purge_tx_ring(dev);
if (pcnet32_init_ring(dev))
return;
/* ReInit Ring */
lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
i = 0;
while (i++ < 1000)
if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
break;
lp->a->write_csr(ioaddr, CSR0, csr0_bits);
}
static void pcnet32_tx_timeout(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr, flags;
spin_lock_irqsave(&lp->lock, flags);
/* Transmitter timeout, serious problems. */
if (pcnet32_debug & NETIF_MSG_DRV)
pr_err("%s: transmit timed out, status %4.4x, resetting\n",
dev->name, lp->a->read_csr(ioaddr, CSR0));
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
dev->stats.tx_errors++;
if (netif_msg_tx_err(lp)) {
int i;
printk(KERN_DEBUG
" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
lp->cur_rx);
for (i = 0; i < lp->rx_ring_size; i++)
printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
le32_to_cpu(lp->rx_ring[i].base),
(-le16_to_cpu(lp->rx_ring[i].buf_length)) &
0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
le16_to_cpu(lp->rx_ring[i].status));
for (i = 0; i < lp->tx_ring_size; i++)
printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
le32_to_cpu(lp->tx_ring[i].base),
(-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
le32_to_cpu(lp->tx_ring[i].misc),
le16_to_cpu(lp->tx_ring[i].status));
printk("\n");
}
pcnet32_restart(dev, CSR0_NORMAL);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
}
static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr;
u16 status;
int entry;
unsigned long flags;
spin_lock_irqsave(&lp->lock, flags);
netif_printk(lp, tx_queued, KERN_DEBUG, dev,
"%s() called, csr0 %4.4x\n",
__func__, lp->a->read_csr(ioaddr, CSR0));
/* Default status -- will not enable Successful-TxDone
* interrupt when that option is available to us.
*/
status = 0x8300;
/* Fill in a Tx ring entry */
/* Mask to ring buffer boundary. */
entry = lp->cur_tx & lp->tx_mod_mask;
/* Caution: the write order is important here, set the status
* with the "ownership" bits last. */
lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
lp->tx_ring[entry].misc = 0x00000000;
lp->tx_skbuff[entry] = skb;
lp->tx_dma_addr[entry] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[entry].status = cpu_to_le16(status);
lp->cur_tx++;
dev->stats.tx_bytes += skb->len;
/* Trigger an immediate send poll. */
lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
lp->tx_full = 1;
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
/* The PCNET32 interrupt handler. */
static irqreturn_t
pcnet32_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct pcnet32_private *lp;
unsigned long ioaddr;
u16 csr0;
int boguscnt = max_interrupt_work;
ioaddr = dev->base_addr;
lp = netdev_priv(dev);
spin_lock(&lp->lock);
csr0 = lp->a->read_csr(ioaddr, CSR0);
while ((csr0 & 0x8f00) && --boguscnt >= 0) {
if (csr0 == 0xffff)
break; /* PCMCIA remove happened */
/* Acknowledge all of the current interrupt sources ASAP. */
lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
netif_printk(lp, intr, KERN_DEBUG, dev,
"interrupt csr0=%#2.2x new csr=%#2.2x\n",
csr0, lp->a->read_csr(ioaddr, CSR0));
/* Log misc errors. */
if (csr0 & 0x4000)
dev->stats.tx_errors++; /* Tx babble. */
if (csr0 & 0x1000) {
/*
* This happens when our receive ring is full. This
* shouldn't be a problem as we will see normal rx
* interrupts for the frames in the receive ring. But
* there are some PCI chipsets (I can reproduce this
* on SP3G with Intel saturn chipset) which have
* sometimes problems and will fill up the receive
* ring with error descriptors. In this situation we
* don't get a rx interrupt, but a missed frame
* interrupt sooner or later.
*/
dev->stats.rx_errors++; /* Missed a Rx frame. */
}
if (csr0 & 0x0800) {
netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
csr0);
/* unlike for the lance, there is no restart needed */
}
if (napi_schedule_prep(&lp->napi)) {
u16 val;
/* set interrupt masks */
val = lp->a->read_csr(ioaddr, CSR3);
val |= 0x5f00;
lp->a->write_csr(ioaddr, CSR3, val);
__napi_schedule(&lp->napi);
break;
}
csr0 = lp->a->read_csr(ioaddr, CSR0);
}
netif_printk(lp, intr, KERN_DEBUG, dev,
"exiting interrupt, csr0=%#4.4x\n",
lp->a->read_csr(ioaddr, CSR0));
spin_unlock(&lp->lock);
return IRQ_HANDLED;
}
static int pcnet32_close(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
del_timer_sync(&lp->watchdog_timer);
netif_stop_queue(dev);
napi_disable(&lp->napi);
spin_lock_irqsave(&lp->lock, flags);
dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
netif_printk(lp, ifdown, KERN_DEBUG, dev,
"Shutting down ethercard, status was %2.2x\n",
lp->a->read_csr(ioaddr, CSR0));
/* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
/*
* Switch back to 16bit mode to avoid problems with dumb
* DOS packet driver after a warm reboot
*/
lp->a->write_bcr(ioaddr, 20, 4);
spin_unlock_irqrestore(&lp->lock, flags);
free_irq(dev->irq, dev);
spin_lock_irqsave(&lp->lock, flags);
pcnet32_purge_rx_ring(dev);
pcnet32_purge_tx_ring(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
}
static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr;
unsigned long flags;
spin_lock_irqsave(&lp->lock, flags);
dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
spin_unlock_irqrestore(&lp->lock, flags);
return &dev->stats;
}
/* taken from the sunlance driver, which it took from the depca driver */
static void pcnet32_load_multicast(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
volatile struct pcnet32_init_block *ib = lp->init_block;
volatile __le16 *mcast_table = (__le16 *)ib->filter;
struct netdev_hw_addr *ha;
unsigned long ioaddr = dev->base_addr;
int i;
u32 crc;
/* set all multicast bits */
if (dev->flags & IFF_ALLMULTI) {
ib->filter[0] = cpu_to_le32(~0U);
ib->filter[1] = cpu_to_le32(~0U);
lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
return;
}
/* clear the multicast filter */
ib->filter[0] = 0;
ib->filter[1] = 0;
/* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr);
crc = crc >> 26;
mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
}
for (i = 0; i < 4; i++)
lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
le16_to_cpu(mcast_table[i]));
}
/*
* Set or clear the multicast filter for this adaptor.
*/
static void pcnet32_set_multicast_list(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr, flags;
struct pcnet32_private *lp = netdev_priv(dev);
int csr15, suspended;
spin_lock_irqsave(&lp->lock, flags);
suspended = pcnet32_suspend(dev, &flags, 0);
csr15 = lp->a->read_csr(ioaddr, CSR15);
if (dev->flags & IFF_PROMISC) {
/* Log any net taps. */
netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
lp->init_block->mode =
cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
7);
lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
} else {
lp->init_block->mode =
cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
pcnet32_load_multicast(dev);
}
if (suspended) {
int csr5;
/* clear SUSPEND (SPND) - CSR5 bit 0 */
csr5 = lp->a->read_csr(ioaddr, CSR5);
lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
} else {
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
pcnet32_restart(dev, CSR0_NORMAL);
netif_wake_queue(dev);
}
spin_unlock_irqrestore(&lp->lock, flags);
}
/* This routine assumes that the lp->lock is held */
static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr;
u16 val_out;
if (!lp->mii)
return 0;
lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
val_out = lp->a->read_bcr(ioaddr, 34);
return val_out;
}
/* This routine assumes that the lp->lock is held */
static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr;
if (!lp->mii)
return;
lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
lp->a->write_bcr(ioaddr, 34, val);
}
static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct pcnet32_private *lp = netdev_priv(dev);
int rc;
unsigned long flags;
/* SIOC[GS]MIIxxx ioctls */
if (lp->mii) {
spin_lock_irqsave(&lp->lock, flags);
rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
rc = -EOPNOTSUPP;
}
return rc;
}
static int pcnet32_check_otherphy(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
struct mii_if_info mii = lp->mii_if;
u16 bmcr;
int i;
for (i = 0; i < PCNET32_MAX_PHYS; i++) {
if (i == lp->mii_if.phy_id)
continue; /* skip active phy */
if (lp->phymask & (1 << i)) {
mii.phy_id = i;
if (mii_link_ok(&mii)) {
/* found PHY with active link */
netif_info(lp, link, dev, "Using PHY number %d\n",
i);
/* isolate inactive phy */
bmcr =
mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
bmcr | BMCR_ISOLATE);
/* de-isolate new phy */
bmcr = mdio_read(dev, i, MII_BMCR);
mdio_write(dev, i, MII_BMCR,
bmcr & ~BMCR_ISOLATE);
/* set new phy address */
lp->mii_if.phy_id = i;
return 1;
}
}
}
return 0;
}
/*
* Show the status of the media. Similar to mii_check_media however it
* correctly shows the link speed for all (tested) pcnet32 variants.
* Devices with no mii just report link state without speed.
*
* Caller is assumed to hold and release the lp->lock.
*/
static void pcnet32_check_media(struct net_device *dev, int verbose)
{
struct pcnet32_private *lp = netdev_priv(dev);
int curr_link;
int prev_link = netif_carrier_ok(dev) ? 1 : 0;
u32 bcr9;
if (lp->mii) {
curr_link = mii_link_ok(&lp->mii_if);
} else {
ulong ioaddr = dev->base_addr; /* card base I/O address */
curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
}
if (!curr_link) {
if (prev_link || verbose) {
netif_carrier_off(dev);
netif_info(lp, link, dev, "link down\n");
}
if (lp->phycount > 1) {
curr_link = pcnet32_check_otherphy(dev);
prev_link = 0;
}
} else if (verbose || !prev_link) {
netif_carrier_on(dev);
if (lp->mii) {
if (netif_msg_link(lp)) {
struct ethtool_cmd ecmd = {
.cmd = ETHTOOL_GSET };
mii_ethtool_gset(&lp->mii_if, &ecmd);
netdev_info(dev, "link up, %uMbps, %s-duplex\n",
ethtool_cmd_speed(&ecmd),
(ecmd.duplex == DUPLEX_FULL)
? "full" : "half");
}
bcr9 = lp->a->read_bcr(dev->base_addr, 9);
if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
if (lp->mii_if.full_duplex)
bcr9 |= (1 << 0);
else
bcr9 &= ~(1 << 0);
lp->a->write_bcr(dev->base_addr, 9, bcr9);
}
} else {
netif_info(lp, link, dev, "link up\n");
}
}
}
/*
* Check for loss of link and link establishment.
* Can not use mii_check_media because it does nothing if mode is forced.
*/
static void pcnet32_watchdog(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
/* Print the link status if it has changed */
spin_lock_irqsave(&lp->lock, flags);
pcnet32_check_media(dev, 0);
spin_unlock_irqrestore(&lp->lock, flags);
mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
}
static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (netif_running(dev)) {
netif_device_detach(dev);
pcnet32_close(dev);
}
pci_save_state(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int pcnet32_pm_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (netif_running(dev)) {
pcnet32_open(dev);
netif_device_attach(dev);
}
return 0;
}
static void pcnet32_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
struct pcnet32_private *lp = netdev_priv(dev);
unregister_netdev(dev);
pcnet32_free_ring(dev);
release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
free_netdev(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
static struct pci_driver pcnet32_driver = {
.name = DRV_NAME,
.probe = pcnet32_probe_pci,
.remove = pcnet32_remove_one,
.id_table = pcnet32_pci_tbl,
.suspend = pcnet32_pm_suspend,
.resume = pcnet32_pm_resume,
};
/* An additional parameter that may be passed in... */
static int debug = -1;
static int tx_start_pt = -1;
static int pcnet32_have_pci;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, DRV_NAME " debug level");
module_param(max_interrupt_work, int, 0);
MODULE_PARM_DESC(max_interrupt_work,
DRV_NAME " maximum events handled per interrupt");
module_param(rx_copybreak, int, 0);
MODULE_PARM_DESC(rx_copybreak,
DRV_NAME " copy breakpoint for copy-only-tiny-frames");
module_param(tx_start_pt, int, 0);
MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
module_param(pcnet32vlb, int, 0);
MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
module_param_array(options, int, NULL, 0);
MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
module_param_array(homepna, int, NULL, 0);
MODULE_PARM_DESC(homepna,
DRV_NAME
" mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
MODULE_AUTHOR("Thomas Bogendoerfer");
MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
MODULE_LICENSE("GPL");
#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
static int __init pcnet32_init_module(void)
{
pr_info("%s", version);
pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
tx_start = tx_start_pt;
/* find the PCI devices */
if (!pci_register_driver(&pcnet32_driver))
pcnet32_have_pci = 1;
/* should we find any remaining VLbus devices ? */
if (pcnet32vlb)
pcnet32_probe_vlbus(pcnet32_portlist);
if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
pr_info("%d cards_found\n", cards_found);
return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
}
static void __exit pcnet32_cleanup_module(void)
{
struct net_device *next_dev;
while (pcnet32_dev) {
struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
next_dev = lp->next;
unregister_netdev(pcnet32_dev);
pcnet32_free_ring(pcnet32_dev);
release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
free_netdev(pcnet32_dev);
pcnet32_dev = next_dev;
}
if (pcnet32_have_pci)
pci_unregister_driver(&pcnet32_driver);
}
module_init(pcnet32_init_module);
module_exit(pcnet32_cleanup_module);
/*
* Local variables:
* c-indent-level: 4
* tab-width: 8
* End:
*/
| gpl-2.0 |
NAM-IL/LINUX-rpi-4.2.y | drivers/media/pci/solo6x10/solo6x10-disp.c | 1884 | 9847 | /*
* Copyright (C) 2010-2013 Bluecherry, LLC <http://www.bluecherrydvr.com>
*
* Original author:
* Ben Collins <bcollins@ubuntu.com>
*
* Additional work by:
* John Brooks <john.brooks@bluecherry.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/videodev2.h>
#include <media/v4l2-ioctl.h>
#include "solo6x10.h"
#define SOLO_VCLK_DELAY 3
#define SOLO_PROGRESSIVE_VSIZE 1024
#define SOLO_MOT_THRESH_W 64
#define SOLO_MOT_THRESH_H 64
#define SOLO_MOT_THRESH_SIZE 8192
#define SOLO_MOT_THRESH_REAL (SOLO_MOT_THRESH_W * SOLO_MOT_THRESH_H)
#define SOLO_MOT_FLAG_SIZE 1024
#define SOLO_MOT_FLAG_AREA (SOLO_MOT_FLAG_SIZE * 16)
static void solo_vin_config(struct solo_dev *solo_dev)
{
solo_dev->vin_hstart = 8;
solo_dev->vin_vstart = 2;
solo_reg_write(solo_dev, SOLO_SYS_VCLK,
SOLO_VCLK_SELECT(2) |
SOLO_VCLK_VIN1415_DELAY(SOLO_VCLK_DELAY) |
SOLO_VCLK_VIN1213_DELAY(SOLO_VCLK_DELAY) |
SOLO_VCLK_VIN1011_DELAY(SOLO_VCLK_DELAY) |
SOLO_VCLK_VIN0809_DELAY(SOLO_VCLK_DELAY) |
SOLO_VCLK_VIN0607_DELAY(SOLO_VCLK_DELAY) |
SOLO_VCLK_VIN0405_DELAY(SOLO_VCLK_DELAY) |
SOLO_VCLK_VIN0203_DELAY(SOLO_VCLK_DELAY) |
SOLO_VCLK_VIN0001_DELAY(SOLO_VCLK_DELAY));
solo_reg_write(solo_dev, SOLO_VI_ACT_I_P,
SOLO_VI_H_START(solo_dev->vin_hstart) |
SOLO_VI_V_START(solo_dev->vin_vstart) |
SOLO_VI_V_STOP(solo_dev->vin_vstart +
solo_dev->video_vsize));
solo_reg_write(solo_dev, SOLO_VI_ACT_I_S,
SOLO_VI_H_START(solo_dev->vout_hstart) |
SOLO_VI_V_START(solo_dev->vout_vstart) |
SOLO_VI_V_STOP(solo_dev->vout_vstart +
solo_dev->video_vsize));
solo_reg_write(solo_dev, SOLO_VI_ACT_P,
SOLO_VI_H_START(0) |
SOLO_VI_V_START(1) |
SOLO_VI_V_STOP(SOLO_PROGRESSIVE_VSIZE));
solo_reg_write(solo_dev, SOLO_VI_CH_FORMAT,
SOLO_VI_FD_SEL_MASK(0) | SOLO_VI_PROG_MASK(0));
/* On 6110, initialize mozaic darkness stength */
if (solo_dev->type == SOLO_DEV_6010)
solo_reg_write(solo_dev, SOLO_VI_FMT_CFG, 0);
else
solo_reg_write(solo_dev, SOLO_VI_FMT_CFG, 16 << 22);
solo_reg_write(solo_dev, SOLO_VI_PAGE_SW, 2);
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) {
solo_reg_write(solo_dev, SOLO_VI_PB_CONFIG,
SOLO_VI_PB_USER_MODE);
solo_reg_write(solo_dev, SOLO_VI_PB_RANGE_HV,
SOLO_VI_PB_HSIZE(858) | SOLO_VI_PB_VSIZE(246));
solo_reg_write(solo_dev, SOLO_VI_PB_ACT_V,
SOLO_VI_PB_VSTART(4) |
SOLO_VI_PB_VSTOP(4 + 240));
} else {
solo_reg_write(solo_dev, SOLO_VI_PB_CONFIG,
SOLO_VI_PB_USER_MODE | SOLO_VI_PB_PAL);
solo_reg_write(solo_dev, SOLO_VI_PB_RANGE_HV,
SOLO_VI_PB_HSIZE(864) | SOLO_VI_PB_VSIZE(294));
solo_reg_write(solo_dev, SOLO_VI_PB_ACT_V,
SOLO_VI_PB_VSTART(4) |
SOLO_VI_PB_VSTOP(4 + 288));
}
solo_reg_write(solo_dev, SOLO_VI_PB_ACT_H, SOLO_VI_PB_HSTART(16) |
SOLO_VI_PB_HSTOP(16 + 720));
}
static void solo_vout_config_cursor(struct solo_dev *dev)
{
int i;
/* Load (blank) cursor bitmap mask (2bpp) */
for (i = 0; i < 20; i++)
solo_reg_write(dev, SOLO_VO_CURSOR_MASK(i), 0);
solo_reg_write(dev, SOLO_VO_CURSOR_POS, 0);
solo_reg_write(dev, SOLO_VO_CURSOR_CLR,
(0x80 << 24) | (0x80 << 16) | (0x10 << 8) | 0x80);
solo_reg_write(dev, SOLO_VO_CURSOR_CLR2, (0xe0 << 8) | 0x80);
}
static void solo_vout_config(struct solo_dev *solo_dev)
{
solo_dev->vout_hstart = 6;
solo_dev->vout_vstart = 8;
solo_reg_write(solo_dev, SOLO_VO_FMT_ENC,
solo_dev->video_type |
SOLO_VO_USER_COLOR_SET_NAV |
SOLO_VO_USER_COLOR_SET_NAH |
SOLO_VO_NA_COLOR_Y(0) |
SOLO_VO_NA_COLOR_CB(0) |
SOLO_VO_NA_COLOR_CR(0));
solo_reg_write(solo_dev, SOLO_VO_ACT_H,
SOLO_VO_H_START(solo_dev->vout_hstart) |
SOLO_VO_H_STOP(solo_dev->vout_hstart +
solo_dev->video_hsize));
solo_reg_write(solo_dev, SOLO_VO_ACT_V,
SOLO_VO_V_START(solo_dev->vout_vstart) |
SOLO_VO_V_STOP(solo_dev->vout_vstart +
solo_dev->video_vsize));
solo_reg_write(solo_dev, SOLO_VO_RANGE_HV,
SOLO_VO_H_LEN(solo_dev->video_hsize) |
SOLO_VO_V_LEN(solo_dev->video_vsize));
/* Border & background colors */
solo_reg_write(solo_dev, SOLO_VO_BORDER_LINE_COLOR,
(0xa0 << 24) | (0x88 << 16) | (0xa0 << 8) | 0x88);
solo_reg_write(solo_dev, SOLO_VO_BORDER_FILL_COLOR,
(0x10 << 24) | (0x8f << 16) | (0x10 << 8) | 0x8f);
solo_reg_write(solo_dev, SOLO_VO_BKG_COLOR,
(16 << 24) | (128 << 16) | (16 << 8) | 128);
solo_reg_write(solo_dev, SOLO_VO_DISP_ERASE, SOLO_VO_DISP_ERASE_ON);
solo_reg_write(solo_dev, SOLO_VI_WIN_SW, 0);
solo_reg_write(solo_dev, SOLO_VO_ZOOM_CTRL, 0);
solo_reg_write(solo_dev, SOLO_VO_FREEZE_CTRL, 0);
solo_reg_write(solo_dev, SOLO_VO_DISP_CTRL, SOLO_VO_DISP_ON |
SOLO_VO_DISP_ERASE_COUNT(8) |
SOLO_VO_DISP_BASE(SOLO_DISP_EXT_ADDR));
solo_vout_config_cursor(solo_dev);
/* Enable channels we support */
solo_reg_write(solo_dev, SOLO_VI_CH_ENA,
(1 << solo_dev->nr_chans) - 1);
}
static int solo_dma_vin_region(struct solo_dev *solo_dev, u32 off,
u16 val, int reg_size)
{
__le16 *buf;
const int n = 64, size = n * sizeof(*buf);
int i, ret = 0;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (i = 0; i < n; i++)
buf[i] = cpu_to_le16(val);
for (i = 0; i < reg_size; i += size) {
ret = solo_p2m_dma(solo_dev, 1, buf,
SOLO_MOTION_EXT_ADDR(solo_dev) + off + i,
size, 0, 0);
if (ret)
break;
}
kfree(buf);
return ret;
}
int solo_set_motion_threshold(struct solo_dev *solo_dev, u8 ch, u16 val)
{
if (ch > solo_dev->nr_chans)
return -EINVAL;
return solo_dma_vin_region(solo_dev, SOLO_MOT_FLAG_AREA +
(ch * SOLO_MOT_THRESH_SIZE * 2),
val, SOLO_MOT_THRESH_SIZE);
}
int solo_set_motion_block(struct solo_dev *solo_dev, u8 ch,
const u16 *thresholds)
{
const unsigned size = sizeof(u16) * 64;
u32 off = SOLO_MOT_FLAG_AREA + ch * SOLO_MOT_THRESH_SIZE * 2;
__le16 *buf;
int x, y;
int ret = 0;
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
for (y = 0; y < SOLO_MOTION_SZ; y++) {
for (x = 0; x < SOLO_MOTION_SZ; x++)
buf[x] = cpu_to_le16(thresholds[y * SOLO_MOTION_SZ + x]);
ret |= solo_p2m_dma(solo_dev, 1, buf,
SOLO_MOTION_EXT_ADDR(solo_dev) + off + y * size,
size, 0, 0);
}
kfree(buf);
return ret;
}
/* First 8k is motion flag (512 bytes * 16). Following that is an 8k+8k
* threshold and working table for each channel. Atleast that's what the
* spec says. However, this code (taken from rdk) has some mystery 8k
* block right after the flag area, before the first thresh table. */
static void solo_motion_config(struct solo_dev *solo_dev)
{
int i;
for (i = 0; i < solo_dev->nr_chans; i++) {
/* Clear motion flag area */
solo_dma_vin_region(solo_dev, i * SOLO_MOT_FLAG_SIZE, 0x0000,
SOLO_MOT_FLAG_SIZE);
/* Clear working cache table */
solo_dma_vin_region(solo_dev, SOLO_MOT_FLAG_AREA +
(i * SOLO_MOT_THRESH_SIZE * 2) +
SOLO_MOT_THRESH_SIZE, 0x0000,
SOLO_MOT_THRESH_SIZE);
/* Set default threshold table */
solo_set_motion_threshold(solo_dev, i, SOLO_DEF_MOT_THRESH);
}
/* Default motion settings */
solo_reg_write(solo_dev, SOLO_VI_MOT_ADR, SOLO_VI_MOTION_EN(0) |
(SOLO_MOTION_EXT_ADDR(solo_dev) >> 16));
solo_reg_write(solo_dev, SOLO_VI_MOT_CTRL,
SOLO_VI_MOTION_FRAME_COUNT(3) |
SOLO_VI_MOTION_SAMPLE_LENGTH(solo_dev->video_hsize / 16)
/* | SOLO_VI_MOTION_INTR_START_STOP */
| SOLO_VI_MOTION_SAMPLE_COUNT(10));
solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER, 0);
solo_reg_write(solo_dev, SOLO_VI_MOTION_BAR, 0);
}
int solo_disp_init(struct solo_dev *solo_dev)
{
int i;
solo_dev->video_hsize = 704;
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) {
solo_dev->video_vsize = 240;
solo_dev->fps = 30;
} else {
solo_dev->video_vsize = 288;
solo_dev->fps = 25;
}
solo_vin_config(solo_dev);
solo_motion_config(solo_dev);
solo_vout_config(solo_dev);
for (i = 0; i < solo_dev->nr_chans; i++)
solo_reg_write(solo_dev, SOLO_VI_WIN_ON(i), 1);
return 0;
}
void solo_disp_exit(struct solo_dev *solo_dev)
{
int i;
solo_reg_write(solo_dev, SOLO_VO_DISP_CTRL, 0);
solo_reg_write(solo_dev, SOLO_VO_ZOOM_CTRL, 0);
solo_reg_write(solo_dev, SOLO_VO_FREEZE_CTRL, 0);
for (i = 0; i < solo_dev->nr_chans; i++) {
solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL0(i), 0);
solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(i), 0);
solo_reg_write(solo_dev, SOLO_VI_WIN_ON(i), 0);
}
/* Set default border */
for (i = 0; i < 5; i++)
solo_reg_write(solo_dev, SOLO_VO_BORDER_X(i), 0);
for (i = 0; i < 5; i++)
solo_reg_write(solo_dev, SOLO_VO_BORDER_Y(i), 0);
solo_reg_write(solo_dev, SOLO_VO_BORDER_LINE_MASK, 0);
solo_reg_write(solo_dev, SOLO_VO_BORDER_FILL_MASK, 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(0), 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(0), 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(0), 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(1), 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(1), 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(1), 0);
}
| gpl-2.0 |
frankiek3/android_kernel_samsung_intercept | drivers/staging/rtl8192u/r819xU_cmdpkt.c | 2140 | 22538 | /******************************************************************************
(c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
Module: r819xusb_cmdpkt.c (RTL8190 TX/RX command packet handler Source C File)
Note: The module is responsible for handling TX and RX command packet.
1. TX : Send set and query configuration command packet.
2. RX : Receive tx feedback, beacon state, query configuration
command packet.
Function:
Export:
Abbrev:
History:
Data Who Remark
05/06/2008 amy Create initial version porting from windows driver.
******************************************************************************/
#include "r8192U.h"
#include "r819xU_cmdpkt.h"
/*---------------------------Define Local Constant---------------------------*/
/* Debug constant*/
#define CMPK_DEBOUNCE_CNT 1
/* 2007/10/24 MH Add for printing a range of data. */
#define CMPK_PRINT(Address)\
{\
unsigned char i;\
u32 temp[10];\
\
memcpy(temp, Address, 40);\
for (i = 0; i <40; i+=4)\
printk("\r\n %08x", temp[i]);\
}\
/*---------------------------Define functions---------------------------------*/
rt_status
SendTxCommandPacket(
struct net_device *dev,
void* pData,
u32 DataLen
)
{
rt_status rtStatus = RT_STATUS_SUCCESS;
struct r8192_priv *priv = ieee80211_priv(dev);
struct sk_buff *skb;
cb_desc *tcb_desc;
unsigned char *ptr_buf;
//bool bLastInitPacket = false;
//PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
//Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
tcb_desc->bLastIniPkt = 0;
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
ptr_buf = skb_put(skb, DataLen);
memcpy(ptr_buf,pData,DataLen);
tcb_desc->txbuf_size= (u16)DataLen;
if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
(!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
(priv->ieee80211->queue_stop) ) {
RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n");
skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
} else {
priv->ieee80211->softmac_hard_start_xmit(skb,dev);
}
//PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
return rtStatus;
}
/*-----------------------------------------------------------------------------
* Function: cmpk_message_handle_tx()
*
* Overview: Driver internal module can call the API to send message to
* firmware side. For example, you can send a debug command packet.
* Or you can send a request for FW to modify RLX4181 LBUS HW bank.
* Otherwise, you can change MAC/PHT/RF register by firmware at
* run time. We do not support message more than one segment now.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/06/2008 amy porting from windows code.
*
*---------------------------------------------------------------------------*/
extern rt_status cmpk_message_handle_tx(
struct net_device *dev,
u8* codevirtualaddress,
u32 packettype,
u32 buffer_len)
{
bool rt_status = true;
#ifdef RTL8192U
return rt_status;
#else
struct r8192_priv *priv = ieee80211_priv(dev);
u16 frag_threshold;
u16 frag_length, frag_offset = 0;
//u16 total_size;
//int i;
rt_firmware *pfirmware = priv->pFirmware;
struct sk_buff *skb;
unsigned char *seg_ptr;
cb_desc *tcb_desc;
u8 bLastIniPkt;
firmware_init_param(dev);
//Fragmentation might be required
frag_threshold = pfirmware->cmdpacket_frag_thresold;
do {
if((buffer_len - frag_offset) > frag_threshold) {
frag_length = frag_threshold ;
bLastIniPkt = 0;
} else {
frag_length = buffer_len - frag_offset;
bLastIniPkt = 1;
}
/* Allocate skb buffer to contain firmware info and tx descriptor info
* add 4 to avoid packet appending overflow.
* */
#ifdef RTL8192U
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
#else
skb = dev_alloc_skb(frag_length + 4);
#endif
memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = packettype;
tcb_desc->bLastIniPkt = bLastIniPkt;
#ifdef RTL8192U
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
#endif
seg_ptr = skb_put(skb, buffer_len);
/*
* Transform from little endian to big endian
* and pending zero
*/
memcpy(seg_ptr,codevirtualaddress,buffer_len);
tcb_desc->txbuf_size= (u16)buffer_len;
if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
(!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
(priv->ieee80211->queue_stop) ) {
RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
} else {
priv->ieee80211->softmac_hard_start_xmit(skb,dev);
}
codevirtualaddress += frag_length;
frag_offset += frag_length;
}while(frag_offset < buffer_len);
return rt_status;
#endif
} /* CMPK_Message_Handle_Tx */
/*-----------------------------------------------------------------------------
* Function: cmpk_counttxstatistic()
*
* Overview:
*
* Input: PADAPTER pAdapter - .
* CMPK_TXFB_T *psTx_FB - .
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void
cmpk_count_txstatistic(
struct net_device *dev,
cmpk_txfb_t *pstx_fb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_PS
RT_RF_POWER_STATE rtState;
pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
// When RF is off, we should not count the packet for hw/sw synchronize
// reason, ie. there may be a duration while sw switch is changed and hw
// switch is being changed. 2006.12.04, by shien chang.
if (rtState == eRfOff)
{
return;
}
#endif
#ifdef TODO
if(pAdapter->bInHctTest)
return;
#endif
/* We can not know the packet length and transmit type: broadcast or uni
or multicast. So the relative statistics must be collected in tx
feedback info. */
if (pstx_fb->tok)
{
priv->stats.txfeedbackok++;
priv->stats.txoktotal++;
priv->stats.txokbytestotal += pstx_fb->pkt_length;
priv->stats.txokinperiod++;
/* We can not make sure broadcast/multicast or unicast mode. */
if (pstx_fb->pkt_type == PACKET_MULTICAST)
{
priv->stats.txmulticast++;
priv->stats.txbytesmulticast += pstx_fb->pkt_length;
}
else if (pstx_fb->pkt_type == PACKET_BROADCAST)
{
priv->stats.txbroadcast++;
priv->stats.txbytesbroadcast += pstx_fb->pkt_length;
}
else
{
priv->stats.txunicast++;
priv->stats.txbytesunicast += pstx_fb->pkt_length;
}
}
else
{
priv->stats.txfeedbackfail++;
priv->stats.txerrtotal++;
priv->stats.txerrbytestotal += pstx_fb->pkt_length;
/* We can not make sure broadcast/multicast or unicast mode. */
if (pstx_fb->pkt_type == PACKET_MULTICAST)
{
priv->stats.txerrmulticast++;
}
else if (pstx_fb->pkt_type == PACKET_BROADCAST)
{
priv->stats.txerrbroadcast++;
}
else
{
priv->stats.txerrunicast++;
}
}
priv->stats.txretrycount += pstx_fb->retry_cnt;
priv->stats.txfeedbackretry += pstx_fb->retry_cnt;
} /* cmpk_CountTxStatistic */
/*-----------------------------------------------------------------------------
* Function: cmpk_handle_tx_feedback()
*
* Overview: The function is responsible for extract the message inside TX
* feedbck message from firmware. It will contain dedicated info in
* ws-06-0063-rtl8190-command-packet-specification. Please
* refer to chapter "TX Feedback Element". We have to read 20 bytes
* in the command packet.
*
* Input: struct net_device * dev
* u8 * pmsg - Msg Ptr of the command packet.
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/08/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void
cmpk_handle_tx_feedback(
struct net_device *dev,
u8 * pmsg)
{
struct r8192_priv *priv = ieee80211_priv(dev);
cmpk_txfb_t rx_tx_fb; /* */
priv->stats.txfeedback++;
/* 0. Display received message. */
//cmpk_Display_Message(CMPK_RX_TX_FB_SIZE, pMsg);
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
/* 2007/07/05 MH Use pointer to transfer structure memory. */
//memcpy((UINT8 *)&rx_tx_fb, pMsg, sizeof(CMPK_TXFB_T));
memcpy((u8*)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t));
/* 2. Use tx feedback info to count TX statistics. */
cmpk_count_txstatistic(dev, &rx_tx_fb);
/* 2007/01/17 MH Comment previous method for TX statistic function. */
/* Collect info TX feedback packet to fill TCB. */
/* We can not know the packet length and transmit type: broadcast or uni
or multicast. */
//CountTxStatistics( pAdapter, &tcb );
} /* cmpk_Handle_Tx_Feedback */
void
cmdpkt_beacontimerinterrupt_819xusb(
struct net_device *dev
)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tx_rate;
{
//
// 070117, rcnjko: 87B have to S/W beacon for DTM encryption_cmn.
//
if(priv->ieee80211->current_network.mode == IEEE_A ||
priv->ieee80211->current_network.mode == IEEE_N_5G ||
(priv->ieee80211->current_network.mode == IEEE_N_24G && (!priv->ieee80211->pHTInfo->bCurSuppCCK)))
{
tx_rate = 60;
DMESG("send beacon frame tx rate is 6Mbpm\n");
}
else
{
tx_rate =10;
DMESG("send beacon frame tx rate is 1Mbpm\n");
}
rtl819xusb_beacon_tx(dev,tx_rate); // HW Beacon
}
}
/*-----------------------------------------------------------------------------
* Function: cmpk_handle_interrupt_status()
*
* Overview: The function is responsible for extract the message from
* firmware. It will contain dedicated info in
* ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
* Please refer to chapter "Interrupt Status Element".
*
* Input: struct net_device *dev,
* u8* pmsg - Message Pointer of the command packet.
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/12/2008 amy Add this for rtl8192 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void
cmpk_handle_interrupt_status(
struct net_device *dev,
u8* pmsg)
{
cmpk_intr_sta_t rx_intr_status; /* */
struct r8192_priv *priv = ieee80211_priv(dev);
DMESG("---> cmpk_Handle_Interrupt_Status()\n");
/* 0. Display received message. */
//cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg);
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
//rx_bcn_state.Element_ID = pMsg[0];
//rx_bcn_state.Length = pMsg[1];
rx_intr_status.length = pmsg[1];
if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2))
{
DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n");
return;
}
// Statistics of beacon for ad-hoc mode.
if( priv->ieee80211->iw_mode == IW_MODE_ADHOC)
{
//2 maybe need endian transform?
rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4));
//rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4)));
DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status);
if (rx_intr_status.interrupt_status & ISR_TxBcnOk)
{
priv->ieee80211->bibsscoordinator = true;
priv->stats.txbeaconokint++;
}
else if (rx_intr_status.interrupt_status & ISR_TxBcnErr)
{
priv->ieee80211->bibsscoordinator = false;
priv->stats.txbeaconerr++;
}
if (rx_intr_status.interrupt_status & ISR_BcnTimerIntr)
{
cmdpkt_beacontimerinterrupt_819xusb(dev);
}
}
// Other informations in interrupt status we need?
DMESG("<---- cmpk_handle_interrupt_status()\n");
} /* cmpk_handle_interrupt_status */
/*-----------------------------------------------------------------------------
* Function: cmpk_handle_query_config_rx()
*
* Overview: The function is responsible for extract the message from
* firmware. It will contain dedicated info in
* ws-06-0063-rtl8190-command-packet-specification. Please
* refer to chapter "Beacon State Element".
*
* Input: u8 * pmsg - Message Pointer of the command packet.
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void
cmpk_handle_query_config_rx(
struct net_device *dev,
u8* pmsg)
{
cmpk_query_cfg_t rx_query_cfg; /* */
/* 0. Display received message. */
//cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg);
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
//rx_query_cfg.Element_ID = pMsg[0];
//rx_query_cfg.Length = pMsg[1];
rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31;
rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
rx_query_cfg.cfg_offset = pmsg[7];
rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
(pmsg[10] << 8) | (pmsg[11] << 0);
rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
(pmsg[14] << 8) | (pmsg[15] << 0);
} /* cmpk_Handle_Query_Config_Rx */
/*-----------------------------------------------------------------------------
* Function: cmpk_count_tx_status()
*
* Overview: Count aggregated tx status from firmwar of one type rx command
* packet element id = RX_TX_STATUS.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void cmpk_count_tx_status( struct net_device *dev,
cmpk_tx_status_t *pstx_status)
{
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_PS
RT_RF_POWER_STATE rtstate;
pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
// When RF is off, we should not count the packet for hw/sw synchronize
// reason, ie. there may be a duration while sw switch is changed and hw
// switch is being changed. 2006.12.04, by shien chang.
if (rtState == eRfOff)
{
return;
}
#endif
priv->stats.txfeedbackok += pstx_status->txok;
priv->stats.txoktotal += pstx_status->txok;
priv->stats.txfeedbackfail += pstx_status->txfail;
priv->stats.txerrtotal += pstx_status->txfail;
priv->stats.txretrycount += pstx_status->txretry;
priv->stats.txfeedbackretry += pstx_status->txretry;
//pAdapter->TxStats.NumTxOkBytesTotal += psTx_FB->pkt_length;
//pAdapter->TxStats.NumTxErrBytesTotal += psTx_FB->pkt_length;
//pAdapter->MgntInfo.LinkDetectInfo.NumTxOkInPeriod++;
priv->stats.txmulticast += pstx_status->txmcok;
priv->stats.txbroadcast += pstx_status->txbcok;
priv->stats.txunicast += pstx_status->txucok;
priv->stats.txerrmulticast += pstx_status->txmcfail;
priv->stats.txerrbroadcast += pstx_status->txbcfail;
priv->stats.txerrunicast += pstx_status->txucfail;
priv->stats.txbytesmulticast += pstx_status->txmclength;
priv->stats.txbytesbroadcast += pstx_status->txbclength;
priv->stats.txbytesunicast += pstx_status->txuclength;
priv->stats.last_packet_rate = pstx_status->rate;
} /* cmpk_CountTxStatus */
/*-----------------------------------------------------------------------------
* Function: cmpk_handle_tx_status()
*
* Overview: Firmware add a new tx feedback status to reduce rx command
* packet buffer operation load.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void
cmpk_handle_tx_status(
struct net_device *dev,
u8* pmsg)
{
cmpk_tx_status_t rx_tx_sts; /* */
memcpy((void*)&rx_tx_sts, (void*)pmsg, sizeof(cmpk_tx_status_t));
/* 2. Use tx feedback info to count TX statistics. */
cmpk_count_tx_status(dev, &rx_tx_sts);
} /* cmpk_Handle_Tx_Status */
/*-----------------------------------------------------------------------------
* Function: cmpk_handle_tx_rate_history()
*
* Overview: Firmware add a new tx rate history
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static void
cmpk_handle_tx_rate_history(
struct net_device *dev,
u8* pmsg)
{
cmpk_tx_rahis_t *ptxrate;
// RT_RF_POWER_STATE rtState;
u8 i, j;
u16 length = sizeof(cmpk_tx_rahis_t);
u32 *ptemp;
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_PS
pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
// When RF is off, we should not count the packet for hw/sw synchronize
// reason, ie. there may be a duration while sw switch is changed and hw
// switch is being changed. 2006.12.04, by shien chang.
if (rtState == eRfOff)
{
return;
}
#endif
ptemp = (u32 *)pmsg;
//
// Do endian transfer to word alignment(16 bits) for windows system.
// You must do different endian transfer for linux and MAC OS
//
for (i = 0; i < (length/4); i++)
{
u16 temp1, temp2;
temp1 = ptemp[i]&0x0000FFFF;
temp2 = ptemp[i]>>16;
ptemp[i] = (temp1<<16)|temp2;
}
ptxrate = (cmpk_tx_rahis_t *)pmsg;
if (ptxrate == NULL )
{
return;
}
for (i = 0; i < 16; i++)
{
// Collect CCK rate packet num
if (i < 4)
priv->stats.txrate.cck[i] += ptxrate->cck[i];
// Collect OFDM rate packet num
if (i< 8)
priv->stats.txrate.ofdm[i] += ptxrate->ofdm[i];
for (j = 0; j < 4; j++)
priv->stats.txrate.ht_mcs[j][i] += ptxrate->ht_mcs[j][i];
}
} /* cmpk_Handle_Tx_Rate_History */
/*-----------------------------------------------------------------------------
* Function: cmpk_message_handle_rx()
*
* Overview: In the function, we will capture different RX command packet
* info. Every RX command packet element has different message
* length and meaning in content. We only support three type of RX
* command packet now. Please refer to document
* ws-06-0063-rtl8190-command-packet-specification.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
* When Who Remark
* 05/06/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
extern u32
cmpk_message_handle_rx(
struct net_device *dev,
struct ieee80211_rx_stats *pstats)
{
// u32 debug_level = DBG_LOUD;
int total_length;
u8 cmd_length, exe_cnt = 0;
u8 element_id;
u8 *pcmd_buff;
/* 0. Check inpt arguments. If is is a command queue message or pointer is
null. */
if (/*(prfd->queue_id != CMPK_RX_QUEUE_ID) || */(pstats== NULL))
{
/* Print error message. */
/*RT_TRACE(COMP_SEND, DebugLevel,
("\n\r[CMPK]-->Err queue id or pointer"));*/
return 0; /* This is not a command packet. */
}
/* 1. Read received command packet message length from RFD. */
total_length = pstats->Length;
/* 2. Read virtual address from RFD. */
pcmd_buff = pstats->virtual_address;
/* 3. Read command packet element id and length. */
element_id = pcmd_buff[0];
/*RT_TRACE(COMP_SEND, DebugLevel,
("\n\r[CMPK]-->element ID=%d Len=%d", element_id, total_length));*/
/* 4. Check every received command packet content according to different
element type. Because FW may aggregate RX command packet to minimize
transmit time between DRV and FW.*/
// Add a counter to prevent the lock in the loop from being held too long
while (total_length > 0 || exe_cnt++ >100)
{
/* 2007/01/17 MH We support aggregation of different cmd in the same packet. */
element_id = pcmd_buff[0];
switch(element_id)
{
case RX_TX_FEEDBACK:
cmpk_handle_tx_feedback (dev, pcmd_buff);
cmd_length = CMPK_RX_TX_FB_SIZE;
break;
case RX_INTERRUPT_STATUS:
cmpk_handle_interrupt_status(dev, pcmd_buff);
cmd_length = sizeof(cmpk_intr_sta_t);
break;
case BOTH_QUERY_CONFIG:
cmpk_handle_query_config_rx(dev, pcmd_buff);
cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
break;
case RX_TX_STATUS:
cmpk_handle_tx_status(dev, pcmd_buff);
cmd_length = CMPK_RX_TX_STS_SIZE;
break;
case RX_TX_PER_PKT_FEEDBACK:
// You must at lease add a switch case element here,
// Otherwise, we will jump to default case.
//DbgPrint("CCX Test\r\n");
cmd_length = CMPK_RX_TX_FB_SIZE;
break;
case RX_TX_RATE_HISTORY:
//DbgPrint(" rx tx rate history\r\n");
cmpk_handle_tx_rate_history(dev, pcmd_buff);
cmd_length = CMPK_TX_RAHIS_SIZE;
break;
default:
RT_TRACE(COMP_ERR, "---->cmpk_message_handle_rx():unknow CMD Element\n");
return 1; /* This is a command packet. */
}
// 2007/01/22 MH Display received rx command packet info.
//cmpk_Display_Message(cmd_length, pcmd_buff);
// 2007/01/22 MH Add to display tx statistic.
//cmpk_DisplayTxStatistic(pAdapter);
total_length -= cmd_length;
pcmd_buff += cmd_length;
} /* while (total_length > 0) */
return 1; /* This is a command packet. */
} /* CMPK_Message_Handle_Rx */
| gpl-2.0 |
MatiasBjorling/linux | drivers/video/cg14.c | 2396 | 15019 | /* cg14.c: CGFOURTEEN frame buffer driver
*
* Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
*
* Driver layout based loosely on tgafb.c, see that file for credits.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/fbio.h>
#include "sbuslib.h"
/*
* Local functions.
*/
static int cg14_setcolreg(unsigned, unsigned, unsigned, unsigned,
unsigned, struct fb_info *);
static int cg14_mmap(struct fb_info *, struct vm_area_struct *);
static int cg14_ioctl(struct fb_info *, unsigned int, unsigned long);
static int cg14_pan_display(struct fb_var_screeninfo *, struct fb_info *);
/*
* Frame buffer operations
*/
static struct fb_ops cg14_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = cg14_setcolreg,
.fb_pan_display = cg14_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_mmap = cg14_mmap,
.fb_ioctl = cg14_ioctl,
#ifdef CONFIG_COMPAT
.fb_compat_ioctl = sbusfb_compat_ioctl,
#endif
};
#define CG14_MCR_INTENABLE_SHIFT 7
#define CG14_MCR_INTENABLE_MASK 0x80
#define CG14_MCR_VIDENABLE_SHIFT 6
#define CG14_MCR_VIDENABLE_MASK 0x40
#define CG14_MCR_PIXMODE_SHIFT 4
#define CG14_MCR_PIXMODE_MASK 0x30
#define CG14_MCR_TMR_SHIFT 2
#define CG14_MCR_TMR_MASK 0x0c
#define CG14_MCR_TMENABLE_SHIFT 1
#define CG14_MCR_TMENABLE_MASK 0x02
#define CG14_MCR_RESET_SHIFT 0
#define CG14_MCR_RESET_MASK 0x01
#define CG14_REV_REVISION_SHIFT 4
#define CG14_REV_REVISION_MASK 0xf0
#define CG14_REV_IMPL_SHIFT 0
#define CG14_REV_IMPL_MASK 0x0f
#define CG14_VBR_FRAMEBASE_SHIFT 12
#define CG14_VBR_FRAMEBASE_MASK 0x00fff000
#define CG14_VMCR1_SETUP_SHIFT 0
#define CG14_VMCR1_SETUP_MASK 0x000001ff
#define CG14_VMCR1_VCONFIG_SHIFT 9
#define CG14_VMCR1_VCONFIG_MASK 0x00000e00
#define CG14_VMCR2_REFRESH_SHIFT 0
#define CG14_VMCR2_REFRESH_MASK 0x00000001
#define CG14_VMCR2_TESTROWCNT_SHIFT 1
#define CG14_VMCR2_TESTROWCNT_MASK 0x00000002
#define CG14_VMCR2_FBCONFIG_SHIFT 2
#define CG14_VMCR2_FBCONFIG_MASK 0x0000000c
#define CG14_VCR_REFRESHREQ_SHIFT 0
#define CG14_VCR_REFRESHREQ_MASK 0x000003ff
#define CG14_VCR1_REFRESHENA_SHIFT 10
#define CG14_VCR1_REFRESHENA_MASK 0x00000400
#define CG14_VCA_CAD_SHIFT 0
#define CG14_VCA_CAD_MASK 0x000003ff
#define CG14_VCA_VERS_SHIFT 10
#define CG14_VCA_VERS_MASK 0x00000c00
#define CG14_VCA_RAMSPEED_SHIFT 12
#define CG14_VCA_RAMSPEED_MASK 0x00001000
#define CG14_VCA_8MB_SHIFT 13
#define CG14_VCA_8MB_MASK 0x00002000
#define CG14_MCR_PIXMODE_8 0
#define CG14_MCR_PIXMODE_16 2
#define CG14_MCR_PIXMODE_32 3
struct cg14_regs{
u8 mcr; /* Master Control Reg */
u8 ppr; /* Packed Pixel Reg */
u8 tms[2]; /* Test Mode Status Regs */
u8 msr; /* Master Status Reg */
u8 fsr; /* Fault Status Reg */
u8 rev; /* Revision & Impl */
u8 ccr; /* Clock Control Reg */
u32 tmr; /* Test Mode Read Back */
u8 mod; /* Monitor Operation Data Reg */
u8 acr; /* Aux Control */
u8 xxx0[6];
u16 hct; /* Hor Counter */
u16 vct; /* Vert Counter */
u16 hbs; /* Hor Blank Start */
u16 hbc; /* Hor Blank Clear */
u16 hss; /* Hor Sync Start */
u16 hsc; /* Hor Sync Clear */
u16 csc; /* Composite Sync Clear */
u16 vbs; /* Vert Blank Start */
u16 vbc; /* Vert Blank Clear */
u16 vss; /* Vert Sync Start */
u16 vsc; /* Vert Sync Clear */
u16 xcs;
u16 xcc;
u16 fsa; /* Fault Status Address */
u16 adr; /* Address Registers */
u8 xxx1[0xce];
u8 pcg[0x100]; /* Pixel Clock Generator */
u32 vbr; /* Frame Base Row */
u32 vmcr; /* VBC Master Control */
u32 vcr; /* VBC refresh */
u32 vca; /* VBC Config */
};
#define CG14_CCR_ENABLE 0x04
#define CG14_CCR_SELECT 0x02 /* HW/Full screen */
struct cg14_cursor {
u32 cpl0[32]; /* Enable plane 0 */
u32 cpl1[32]; /* Color selection plane */
u8 ccr; /* Cursor Control Reg */
u8 xxx0[3];
u16 cursx; /* Cursor x,y position */
u16 cursy; /* Cursor x,y position */
u32 color0;
u32 color1;
u32 xxx1[0x1bc];
u32 cpl0i[32]; /* Enable plane 0 autoinc */
u32 cpl1i[32]; /* Color selection autoinc */
};
struct cg14_dac {
u8 addr; /* Address Register */
u8 xxx0[255];
u8 glut; /* Gamma table */
u8 xxx1[255];
u8 select; /* Register Select */
u8 xxx2[255];
u8 mode; /* Mode Register */
};
struct cg14_xlut{
u8 x_xlut [256];
u8 x_xlutd [256];
u8 xxx0[0x600];
u8 x_xlut_inc [256];
u8 x_xlutd_inc [256];
};
/* Color look up table (clut) */
/* Each one of these arrays hold the color lookup table (for 256
* colors) for each MDI page (I assume then there should be 4 MDI
* pages, I still wonder what they are. I have seen NeXTStep split
* the screen in four parts, while operating in 24 bits mode. Each
* integer holds 4 values: alpha value (transparency channel, thanks
* go to John Stone (johns@umr.edu) from OpenBSD), red, green and blue
*
* I currently use the clut instead of the Xlut
*/
struct cg14_clut {
u32 c_clut [256];
u32 c_clutd [256]; /* i wonder what the 'd' is for */
u32 c_clut_inc [256];
u32 c_clutd_inc [256];
};
#define CG14_MMAP_ENTRIES 16
struct cg14_par {
spinlock_t lock;
struct cg14_regs __iomem *regs;
struct cg14_clut __iomem *clut;
struct cg14_cursor __iomem *cursor;
u32 flags;
#define CG14_FLAG_BLANKED 0x00000001
unsigned long iospace;
struct sbus_mmap_map mmap_map[CG14_MMAP_ENTRIES];
int mode;
int ramsize;
};
static void __cg14_reset(struct cg14_par *par)
{
struct cg14_regs __iomem *regs = par->regs;
u8 val;
val = sbus_readb(®s->mcr);
val &= ~(CG14_MCR_PIXMODE_MASK);
sbus_writeb(val, ®s->mcr);
}
static int cg14_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct cg14_par *par = (struct cg14_par *) info->par;
unsigned long flags;
/* We just use this to catch switches out of
* graphics mode.
*/
spin_lock_irqsave(&par->lock, flags);
__cg14_reset(par);
spin_unlock_irqrestore(&par->lock, flags);
if (var->xoffset || var->yoffset || var->vmode)
return -EINVAL;
return 0;
}
/**
* cg14_setcolreg - Optional function. Sets a color register.
* @regno: boolean, 0 copy local, 1 get_user() function
* @red: frame buffer colormap structure
* @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
*/
static int cg14_setcolreg(unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
struct cg14_par *par = (struct cg14_par *) info->par;
struct cg14_clut __iomem *clut = par->clut;
unsigned long flags;
u32 val;
if (regno >= 256)
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
val = (red | (green << 8) | (blue << 16));
spin_lock_irqsave(&par->lock, flags);
sbus_writel(val, &clut->c_clut[regno]);
spin_unlock_irqrestore(&par->lock, flags);
return 0;
}
static int cg14_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct cg14_par *par = (struct cg14_par *) info->par;
return sbusfb_mmap_helper(par->mmap_map,
info->fix.smem_start, info->fix.smem_len,
par->iospace, vma);
}
static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
struct cg14_par *par = (struct cg14_par *) info->par;
struct cg14_regs __iomem *regs = par->regs;
struct mdi_cfginfo kmdi, __user *mdii;
unsigned long flags;
int cur_mode, mode, ret = 0;
switch (cmd) {
case MDI_RESET:
spin_lock_irqsave(&par->lock, flags);
__cg14_reset(par);
spin_unlock_irqrestore(&par->lock, flags);
break;
case MDI_GET_CFGINFO:
memset(&kmdi, 0, sizeof(kmdi));
spin_lock_irqsave(&par->lock, flags);
kmdi.mdi_type = FBTYPE_MDICOLOR;
kmdi.mdi_height = info->var.yres;
kmdi.mdi_width = info->var.xres;
kmdi.mdi_mode = par->mode;
kmdi.mdi_pixfreq = 72; /* FIXME */
kmdi.mdi_size = par->ramsize;
spin_unlock_irqrestore(&par->lock, flags);
mdii = (struct mdi_cfginfo __user *) arg;
if (copy_to_user(mdii, &kmdi, sizeof(kmdi)))
ret = -EFAULT;
break;
case MDI_SET_PIXELMODE:
if (get_user(mode, (int __user *) arg)) {
ret = -EFAULT;
break;
}
spin_lock_irqsave(&par->lock, flags);
cur_mode = sbus_readb(®s->mcr);
cur_mode &= ~CG14_MCR_PIXMODE_MASK;
switch(mode) {
case MDI_32_PIX:
cur_mode |= (CG14_MCR_PIXMODE_32 <<
CG14_MCR_PIXMODE_SHIFT);
break;
case MDI_16_PIX:
cur_mode |= (CG14_MCR_PIXMODE_16 <<
CG14_MCR_PIXMODE_SHIFT);
break;
case MDI_8_PIX:
break;
default:
ret = -ENOSYS;
break;
};
if (!ret) {
sbus_writeb(cur_mode, ®s->mcr);
par->mode = mode;
}
spin_unlock_irqrestore(&par->lock, flags);
break;
default:
ret = sbusfb_ioctl_helper(cmd, arg, info,
FBTYPE_MDICOLOR, 8,
info->fix.smem_len);
break;
};
return ret;
}
/*
* Initialisation
*/
static void cg14_init_fix(struct fb_info *info, int linebytes,
struct device_node *dp)
{
const char *name = dp->name;
strlcpy(info->fix.id, name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.line_length = linebytes;
info->fix.accel = FB_ACCEL_SUN_CG14;
}
static struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] = {
{
.voff = CG14_REGS,
.poff = 0x80000000,
.size = 0x1000
},
{
.voff = CG14_XLUT,
.poff = 0x80003000,
.size = 0x1000
},
{
.voff = CG14_CLUT1,
.poff = 0x80004000,
.size = 0x1000
},
{
.voff = CG14_CLUT2,
.poff = 0x80005000,
.size = 0x1000
},
{
.voff = CG14_CLUT3,
.poff = 0x80006000,
.size = 0x1000
},
{
.voff = CG3_MMAP_OFFSET - 0x7000,
.poff = 0x80000000,
.size = 0x7000
},
{
.voff = CG3_MMAP_OFFSET,
.poff = 0x00000000,
.size = SBUS_MMAP_FBSIZE(1)
},
{
.voff = MDI_CURSOR_MAP,
.poff = 0x80001000,
.size = 0x1000
},
{
.voff = MDI_CHUNKY_BGR_MAP,
.poff = 0x01000000,
.size = 0x400000
},
{
.voff = MDI_PLANAR_X16_MAP,
.poff = 0x02000000,
.size = 0x200000
},
{
.voff = MDI_PLANAR_C16_MAP,
.poff = 0x02800000,
.size = 0x200000
},
{
.voff = MDI_PLANAR_X32_MAP,
.poff = 0x03000000,
.size = 0x100000
},
{
.voff = MDI_PLANAR_B32_MAP,
.poff = 0x03400000,
.size = 0x100000
},
{
.voff = MDI_PLANAR_G32_MAP,
.poff = 0x03800000,
.size = 0x100000
},
{
.voff = MDI_PLANAR_R32_MAP,
.poff = 0x03c00000,
.size = 0x100000
},
{ .size = 0 }
};
static void cg14_unmap_regs(struct platform_device *op, struct fb_info *info,
struct cg14_par *par)
{
if (par->regs)
of_iounmap(&op->resource[0],
par->regs, sizeof(struct cg14_regs));
if (par->clut)
of_iounmap(&op->resource[0],
par->clut, sizeof(struct cg14_clut));
if (par->cursor)
of_iounmap(&op->resource[0],
par->cursor, sizeof(struct cg14_cursor));
if (info->screen_base)
of_iounmap(&op->resource[1],
info->screen_base, info->fix.smem_len);
}
static int cg14_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
struct cg14_par *par;
int is_8mb, linebytes, i, err;
info = framebuffer_alloc(sizeof(struct cg14_par), &op->dev);
err = -ENOMEM;
if (!info)
goto out_err;
par = info->par;
spin_lock_init(&par->lock);
sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
linebytes = of_getintprop_default(dp, "linebytes",
info->var.xres);
info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
if (!strcmp(dp->parent->name, "sbus") ||
!strcmp(dp->parent->name, "sbi")) {
info->fix.smem_start = op->resource[0].start;
par->iospace = op->resource[0].flags & IORESOURCE_BITS;
} else {
info->fix.smem_start = op->resource[1].start;
par->iospace = op->resource[0].flags & IORESOURCE_BITS;
}
par->regs = of_ioremap(&op->resource[0], 0,
sizeof(struct cg14_regs), "cg14 regs");
par->clut = of_ioremap(&op->resource[0], CG14_CLUT1,
sizeof(struct cg14_clut), "cg14 clut");
par->cursor = of_ioremap(&op->resource[0], CG14_CURSORREGS,
sizeof(struct cg14_cursor), "cg14 cursor");
info->screen_base = of_ioremap(&op->resource[1], 0,
info->fix.smem_len, "cg14 ram");
if (!par->regs || !par->clut || !par->cursor || !info->screen_base)
goto out_unmap_regs;
is_8mb = (((op->resource[1].end - op->resource[1].start) + 1) ==
(8 * 1024 * 1024));
BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map));
memcpy(&par->mmap_map, &__cg14_mmap_map, sizeof(par->mmap_map));
for (i = 0; i < CG14_MMAP_ENTRIES; i++) {
struct sbus_mmap_map *map = &par->mmap_map[i];
if (!map->size)
break;
if (map->poff & 0x80000000)
map->poff = (map->poff & 0x7fffffff) +
(op->resource[0].start -
op->resource[1].start);
if (is_8mb &&
map->size >= 0x100000 &&
map->size <= 0x400000)
map->size *= 2;
}
par->mode = MDI_8_PIX;
par->ramsize = (is_8mb ? 0x800000 : 0x400000);
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
info->fbops = &cg14_ops;
__cg14_reset(par);
if (fb_alloc_cmap(&info->cmap, 256, 0))
goto out_unmap_regs;
fb_set_cmap(&info->cmap, info);
cg14_init_fix(info, linebytes, dp);
err = register_framebuffer(info);
if (err < 0)
goto out_dealloc_cmap;
dev_set_drvdata(&op->dev, info);
printk(KERN_INFO "%s: cgfourteen at %lx:%lx, %dMB\n",
dp->full_name,
par->iospace, info->fix.smem_start,
par->ramsize >> 20);
return 0;
out_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
out_unmap_regs:
cg14_unmap_regs(op, info, par);
framebuffer_release(info);
out_err:
return err;
}
static int cg14_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct cg14_par *par = info->par;
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
cg14_unmap_regs(op, info, par);
framebuffer_release(info);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
static const struct of_device_id cg14_match[] = {
{
.name = "cgfourteen",
},
{},
};
MODULE_DEVICE_TABLE(of, cg14_match);
static struct platform_driver cg14_driver = {
.driver = {
.name = "cg14",
.owner = THIS_MODULE,
.of_match_table = cg14_match,
},
.probe = cg14_probe,
.remove = cg14_remove,
};
static int __init cg14_init(void)
{
if (fb_get_options("cg14fb", NULL))
return -ENODEV;
return platform_driver_register(&cg14_driver);
}
static void __exit cg14_exit(void)
{
platform_driver_unregister(&cg14_driver);
}
module_init(cg14_init);
module_exit(cg14_exit);
MODULE_DESCRIPTION("framebuffer driver for CGfourteen chipsets");
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
TWRP-J5/android_kernel_samsung_j5lte | sound/pci/ice1712/quartet.c | 2396 | 30642 | /*
* ALSA driver for ICEnsemble VT1724 (Envy24HT)
*
* Lowlevel functions for Infrasonic Quartet
*
* Copyright (c) 2009 Pavel Hofman <pavel.hofman@ivitera.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/tlv.h>
#include <sound/info.h>
#include "ice1712.h"
#include "envy24ht.h"
#include <sound/ak4113.h>
#include "quartet.h"
struct qtet_spec {
struct ak4113 *ak4113;
unsigned int scr; /* system control register */
unsigned int mcr; /* monitoring control register */
unsigned int cpld; /* cpld register */
};
struct qtet_kcontrol_private {
unsigned int bit;
void (*set_register)(struct snd_ice1712 *ice, unsigned int val);
unsigned int (*get_register)(struct snd_ice1712 *ice);
unsigned char * const texts[2];
};
enum {
IN12_SEL = 0,
IN34_SEL,
AIN34_SEL,
COAX_OUT,
IN12_MON12,
IN12_MON34,
IN34_MON12,
IN34_MON34,
OUT12_MON34,
OUT34_MON12,
};
static const char * const ext_clock_names[3] = {"IEC958 In", "Word Clock 1xFS",
"Word Clock 256xFS"};
/* chip address on I2C bus */
#define AK4113_ADDR 0x26 /* S/PDIF receiver */
/* chip address on SPI bus */
#define AK4620_ADDR 0x02 /* ADC/DAC */
/*
* GPIO pins
*/
/* GPIO0 - O - DATA0, def. 0 */
#define GPIO_D0 (1<<0)
/* GPIO1 - I/O - DATA1, Jack Detect Input0 (0:present, 1:missing), def. 1 */
#define GPIO_D1_JACKDTC0 (1<<1)
/* GPIO2 - I/O - DATA2, Jack Detect Input1 (0:present, 1:missing), def. 1 */
#define GPIO_D2_JACKDTC1 (1<<2)
/* GPIO3 - I/O - DATA3, def. 1 */
#define GPIO_D3 (1<<3)
/* GPIO4 - I/O - DATA4, SPI CDTO, def. 1 */
#define GPIO_D4_SPI_CDTO (1<<4)
/* GPIO5 - I/O - DATA5, SPI CCLK, def. 1 */
#define GPIO_D5_SPI_CCLK (1<<5)
/* GPIO6 - I/O - DATA6, Cable Detect Input (0:detected, 1:not detected */
#define GPIO_D6_CD (1<<6)
/* GPIO7 - I/O - DATA7, Device Detect Input (0:detected, 1:not detected */
#define GPIO_D7_DD (1<<7)
/* GPIO8 - O - CPLD Chip Select, def. 1 */
#define GPIO_CPLD_CSN (1<<8)
/* GPIO9 - O - CPLD register read/write (0:write, 1:read), def. 0 */
#define GPIO_CPLD_RW (1<<9)
/* GPIO10 - O - SPI Chip Select for CODEC#0, def. 1 */
#define GPIO_SPI_CSN0 (1<<10)
/* GPIO11 - O - SPI Chip Select for CODEC#1, def. 1 */
#define GPIO_SPI_CSN1 (1<<11)
/* GPIO12 - O - Ex. Register Output Enable (0:enable, 1:disable), def. 1,
* init 0 */
#define GPIO_EX_GPIOE (1<<12)
/* GPIO13 - O - Ex. Register0 Chip Select for System Control Register,
* def. 1 */
#define GPIO_SCR (1<<13)
/* GPIO14 - O - Ex. Register1 Chip Select for Monitor Control Register,
* def. 1 */
#define GPIO_MCR (1<<14)
#define GPIO_SPI_ALL (GPIO_D4_SPI_CDTO | GPIO_D5_SPI_CCLK |\
GPIO_SPI_CSN0 | GPIO_SPI_CSN1)
#define GPIO_DATA_MASK (GPIO_D0 | GPIO_D1_JACKDTC0 | \
GPIO_D2_JACKDTC1 | GPIO_D3 | \
GPIO_D4_SPI_CDTO | GPIO_D5_SPI_CCLK | \
GPIO_D6_CD | GPIO_D7_DD)
/* System Control Register GPIO_SCR data bits */
/* Mic/Line select relay (0:line, 1:mic) */
#define SCR_RELAY GPIO_D0
/* Phantom power drive control (0:5V, 1:48V) */
#define SCR_PHP_V GPIO_D1_JACKDTC0
/* H/W mute control (0:Normal, 1:Mute) */
#define SCR_MUTE GPIO_D2_JACKDTC1
/* Phantom power control (0:Phantom on, 1:off) */
#define SCR_PHP GPIO_D3
/* Analog input 1/2 Source Select */
#define SCR_AIN12_SEL0 GPIO_D4_SPI_CDTO
#define SCR_AIN12_SEL1 GPIO_D5_SPI_CCLK
/* Analog input 3/4 Source Select (0:line, 1:hi-z) */
#define SCR_AIN34_SEL GPIO_D6_CD
/* Codec Power Down (0:power down, 1:normal) */
#define SCR_CODEC_PDN GPIO_D7_DD
#define SCR_AIN12_LINE (0)
#define SCR_AIN12_MIC (SCR_AIN12_SEL0)
#define SCR_AIN12_LOWCUT (SCR_AIN12_SEL1 | SCR_AIN12_SEL0)
/* Monitor Control Register GPIO_MCR data bits */
/* Input 1/2 to Monitor 1/2 (0:off, 1:on) */
#define MCR_IN12_MON12 GPIO_D0
/* Input 1/2 to Monitor 3/4 (0:off, 1:on) */
#define MCR_IN12_MON34 GPIO_D1_JACKDTC0
/* Input 3/4 to Monitor 1/2 (0:off, 1:on) */
#define MCR_IN34_MON12 GPIO_D2_JACKDTC1
/* Input 3/4 to Monitor 3/4 (0:off, 1:on) */
#define MCR_IN34_MON34 GPIO_D3
/* Output to Monitor 1/2 (0:off, 1:on) */
#define MCR_OUT34_MON12 GPIO_D4_SPI_CDTO
/* Output to Monitor 3/4 (0:off, 1:on) */
#define MCR_OUT12_MON34 GPIO_D5_SPI_CCLK
/* CPLD Register DATA bits */
/* Clock Rate Select */
#define CPLD_CKS0 GPIO_D0
#define CPLD_CKS1 GPIO_D1_JACKDTC0
#define CPLD_CKS2 GPIO_D2_JACKDTC1
/* Sync Source Select (0:Internal, 1:External) */
#define CPLD_SYNC_SEL GPIO_D3
/* Word Clock FS Select (0:FS, 1:256FS) */
#define CPLD_WORD_SEL GPIO_D4_SPI_CDTO
/* Coaxial Output Source (IS-Link) (0:SPDIF, 1:I2S) */
#define CPLD_COAX_OUT GPIO_D5_SPI_CCLK
/* Input 1/2 Source Select (0:Analog12, 1:An34) */
#define CPLD_IN12_SEL GPIO_D6_CD
/* Input 3/4 Source Select (0:Analog34, 1:Digital In) */
#define CPLD_IN34_SEL GPIO_D7_DD
/* internal clock (CPLD_SYNC_SEL = 0) options */
#define CPLD_CKS_44100HZ (0)
#define CPLD_CKS_48000HZ (CPLD_CKS0)
#define CPLD_CKS_88200HZ (CPLD_CKS1)
#define CPLD_CKS_96000HZ (CPLD_CKS1 | CPLD_CKS0)
#define CPLD_CKS_176400HZ (CPLD_CKS2)
#define CPLD_CKS_192000HZ (CPLD_CKS2 | CPLD_CKS0)
#define CPLD_CKS_MASK (CPLD_CKS0 | CPLD_CKS1 | CPLD_CKS2)
/* external clock (CPLD_SYNC_SEL = 1) options */
/* external clock - SPDIF */
#define CPLD_EXT_SPDIF (0 | CPLD_SYNC_SEL)
/* external clock - WordClock 1xfs */
#define CPLD_EXT_WORDCLOCK_1FS (CPLD_CKS1 | CPLD_SYNC_SEL)
/* external clock - WordClock 256xfs */
#define CPLD_EXT_WORDCLOCK_256FS (CPLD_CKS1 | CPLD_WORD_SEL |\
CPLD_SYNC_SEL)
#define EXT_SPDIF_TYPE 0
#define EXT_WORDCLOCK_1FS_TYPE 1
#define EXT_WORDCLOCK_256FS_TYPE 2
#define AK4620_DFS0 (1<<0)
#define AK4620_DFS1 (1<<1)
#define AK4620_CKS0 (1<<2)
#define AK4620_CKS1 (1<<3)
/* Clock and Format Control register */
#define AK4620_DFS_REG 0x02
/* Deem and Volume Control register */
#define AK4620_DEEMVOL_REG 0x03
#define AK4620_SMUTE (1<<7)
/*
* Conversion from int value to its binary form. Used for debugging.
* The output buffer must be allocated prior to calling the function.
*/
static char *get_binary(char *buffer, int value)
{
int i, j, pos;
pos = 0;
for (i = 0; i < 4; ++i) {
for (j = 0; j < 8; ++j) {
if (value & (1 << (31-(i*8 + j))))
buffer[pos] = '1';
else
buffer[pos] = '0';
pos++;
}
if (i < 3) {
buffer[pos] = ' ';
pos++;
}
}
buffer[pos] = '\0';
return buffer;
}
/*
* Initial setup of the conversion array GPIO <-> rate
*/
static unsigned int qtet_rates[] = {
44100, 48000, 88200,
96000, 176400, 192000,
};
static unsigned int cks_vals[] = {
CPLD_CKS_44100HZ, CPLD_CKS_48000HZ, CPLD_CKS_88200HZ,
CPLD_CKS_96000HZ, CPLD_CKS_176400HZ, CPLD_CKS_192000HZ,
};
static struct snd_pcm_hw_constraint_list qtet_rates_info = {
.count = ARRAY_SIZE(qtet_rates),
.list = qtet_rates,
.mask = 0,
};
static void qtet_ak4113_write(void *private_data, unsigned char reg,
unsigned char val)
{
snd_vt1724_write_i2c((struct snd_ice1712 *)private_data, AK4113_ADDR,
reg, val);
}
static unsigned char qtet_ak4113_read(void *private_data, unsigned char reg)
{
return snd_vt1724_read_i2c((struct snd_ice1712 *)private_data,
AK4113_ADDR, reg);
}
/*
* AK4620 section
*/
/*
* Write data to addr register of ak4620
*/
static void qtet_akm_write(struct snd_akm4xxx *ak, int chip,
unsigned char addr, unsigned char data)
{
unsigned int tmp, orig_dir;
int idx;
unsigned int addrdata;
struct snd_ice1712 *ice = ak->private_data[0];
if (snd_BUG_ON(chip < 0 || chip >= 4))
return;
/*printk(KERN_DEBUG "Writing to AK4620: chip=%d, addr=0x%x,
data=0x%x\n", chip, addr, data);*/
orig_dir = ice->gpio.get_dir(ice);
ice->gpio.set_dir(ice, orig_dir | GPIO_SPI_ALL);
/* set mask - only SPI bits */
ice->gpio.set_mask(ice, ~GPIO_SPI_ALL);
tmp = ice->gpio.get_data(ice);
/* high all */
tmp |= GPIO_SPI_ALL;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* drop chip select */
if (chip)
/* CODEC 1 */
tmp &= ~GPIO_SPI_CSN1;
else
tmp &= ~GPIO_SPI_CSN0;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* build I2C address + data byte */
addrdata = (AK4620_ADDR << 6) | 0x20 | (addr & 0x1f);
addrdata = (addrdata << 8) | data;
for (idx = 15; idx >= 0; idx--) {
/* drop clock */
tmp &= ~GPIO_D5_SPI_CCLK;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* set data */
if (addrdata & (1 << idx))
tmp |= GPIO_D4_SPI_CDTO;
else
tmp &= ~GPIO_D4_SPI_CDTO;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* raise clock */
tmp |= GPIO_D5_SPI_CCLK;
ice->gpio.set_data(ice, tmp);
udelay(100);
}
/* all back to 1 */
tmp |= GPIO_SPI_ALL;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* return all gpios to non-writable */
ice->gpio.set_mask(ice, 0xffffff);
/* restore GPIOs direction */
ice->gpio.set_dir(ice, orig_dir);
}
static void qtet_akm_set_regs(struct snd_akm4xxx *ak, unsigned char addr,
unsigned char mask, unsigned char value)
{
unsigned char tmp;
int chip;
for (chip = 0; chip < ak->num_chips; chip++) {
tmp = snd_akm4xxx_get(ak, chip, addr);
/* clear the bits */
tmp &= ~mask;
/* set the new bits */
tmp |= value;
snd_akm4xxx_write(ak, chip, addr, tmp);
}
}
/*
* change the rate of AK4620
*/
static void qtet_akm_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate)
{
unsigned char ak4620_dfs;
if (rate == 0) /* no hint - S/PDIF input is master or the new spdif
input rate undetected, simply return */
return;
/* adjust DFS on codecs - see datasheet */
if (rate > 108000)
ak4620_dfs = AK4620_DFS1 | AK4620_CKS1;
else if (rate > 54000)
ak4620_dfs = AK4620_DFS0 | AK4620_CKS0;
else
ak4620_dfs = 0;
/* set new value */
qtet_akm_set_regs(ak, AK4620_DFS_REG, AK4620_DFS0 | AK4620_DFS1 |
AK4620_CKS0 | AK4620_CKS1, ak4620_dfs);
}
#define AK_CONTROL(xname, xch) { .name = xname, .num_channels = xch }
#define PCM_12_PLAYBACK_VOLUME "PCM 1/2 Playback Volume"
#define PCM_34_PLAYBACK_VOLUME "PCM 3/4 Playback Volume"
#define PCM_12_CAPTURE_VOLUME "PCM 1/2 Capture Volume"
#define PCM_34_CAPTURE_VOLUME "PCM 3/4 Capture Volume"
static const struct snd_akm4xxx_dac_channel qtet_dac[] = {
AK_CONTROL(PCM_12_PLAYBACK_VOLUME, 2),
AK_CONTROL(PCM_34_PLAYBACK_VOLUME, 2),
};
static const struct snd_akm4xxx_adc_channel qtet_adc[] = {
AK_CONTROL(PCM_12_CAPTURE_VOLUME, 2),
AK_CONTROL(PCM_34_CAPTURE_VOLUME, 2),
};
static struct snd_akm4xxx akm_qtet_dac = {
.type = SND_AK4620,
.num_dacs = 4, /* DAC1 - Output 12
*/
.num_adcs = 4, /* ADC1 - Input 12
*/
.ops = {
.write = qtet_akm_write,
.set_rate_val = qtet_akm_set_rate_val,
},
.dac_info = qtet_dac,
.adc_info = qtet_adc,
};
/* Communication routines with the CPLD */
/* Writes data to external register reg, both reg and data are
* GPIO representations */
static void reg_write(struct snd_ice1712 *ice, unsigned int reg,
unsigned int data)
{
unsigned int tmp;
mutex_lock(&ice->gpio_mutex);
/* set direction of used GPIOs*/
/* all outputs */
tmp = 0x00ffff;
ice->gpio.set_dir(ice, tmp);
/* mask - writable bits */
ice->gpio.set_mask(ice, ~(tmp));
/* write the data */
tmp = ice->gpio.get_data(ice);
tmp &= ~GPIO_DATA_MASK;
tmp |= data;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* drop output enable */
tmp &= ~GPIO_EX_GPIOE;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* drop the register gpio */
tmp &= ~reg;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* raise the register GPIO */
tmp |= reg;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* raise all data gpios */
tmp |= GPIO_DATA_MASK;
ice->gpio.set_data(ice, tmp);
/* mask - immutable bits */
ice->gpio.set_mask(ice, 0xffffff);
/* outputs only 8-15 */
ice->gpio.set_dir(ice, 0x00ff00);
mutex_unlock(&ice->gpio_mutex);
}
static unsigned int get_scr(struct snd_ice1712 *ice)
{
struct qtet_spec *spec = ice->spec;
return spec->scr;
}
static unsigned int get_mcr(struct snd_ice1712 *ice)
{
struct qtet_spec *spec = ice->spec;
return spec->mcr;
}
static unsigned int get_cpld(struct snd_ice1712 *ice)
{
struct qtet_spec *spec = ice->spec;
return spec->cpld;
}
static void set_scr(struct snd_ice1712 *ice, unsigned int val)
{
struct qtet_spec *spec = ice->spec;
reg_write(ice, GPIO_SCR, val);
spec->scr = val;
}
static void set_mcr(struct snd_ice1712 *ice, unsigned int val)
{
struct qtet_spec *spec = ice->spec;
reg_write(ice, GPIO_MCR, val);
spec->mcr = val;
}
static void set_cpld(struct snd_ice1712 *ice, unsigned int val)
{
struct qtet_spec *spec = ice->spec;
reg_write(ice, GPIO_CPLD_CSN, val);
spec->cpld = val;
}
#ifdef CONFIG_PROC_FS
static void proc_regs_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ice1712 *ice = entry->private_data;
char bin_buffer[36];
snd_iprintf(buffer, "SCR: %s\n", get_binary(bin_buffer,
get_scr(ice)));
snd_iprintf(buffer, "MCR: %s\n", get_binary(bin_buffer,
get_mcr(ice)));
snd_iprintf(buffer, "CPLD: %s\n", get_binary(bin_buffer,
get_cpld(ice)));
}
static void proc_init(struct snd_ice1712 *ice)
{
struct snd_info_entry *entry;
if (!snd_card_proc_new(ice->card, "quartet", &entry))
snd_info_set_text_ops(entry, ice, proc_regs_read);
}
#else /* !CONFIG_PROC_FS */
static void proc_init(struct snd_ice1712 *ice) {}
#endif
static int qtet_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val;
val = get_scr(ice) & SCR_MUTE;
ucontrol->value.integer.value[0] = (val) ? 0 : 1;
return 0;
}
static int qtet_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old, new, smute;
old = get_scr(ice) & SCR_MUTE;
if (ucontrol->value.integer.value[0]) {
/* unmute */
new = 0;
/* un-smuting DAC */
smute = 0;
} else {
/* mute */
new = SCR_MUTE;
/* smuting DAC */
smute = AK4620_SMUTE;
}
if (old != new) {
struct snd_akm4xxx *ak = ice->akm;
set_scr(ice, (get_scr(ice) & ~SCR_MUTE) | new);
/* set smute */
qtet_akm_set_regs(ak, AK4620_DEEMVOL_REG, AK4620_SMUTE, smute);
return 1;
}
/* no change */
return 0;
}
static int qtet_ain12_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[3] =
{"Line In 1/2", "Mic", "Mic + Low-cut"};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = ARRAY_SIZE(texts);
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item =
uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
texts[uinfo->value.enumerated.item]);
return 0;
}
static int qtet_ain12_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val, result;
val = get_scr(ice) & (SCR_AIN12_SEL1 | SCR_AIN12_SEL0);
switch (val) {
case SCR_AIN12_LINE:
result = 0;
break;
case SCR_AIN12_MIC:
result = 1;
break;
case SCR_AIN12_LOWCUT:
result = 2;
break;
default:
/* BUG - no other combinations allowed */
snd_BUG();
result = 0;
}
ucontrol->value.integer.value[0] = result;
return 0;
}
static int qtet_ain12_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old, new, tmp, masked_old;
old = new = get_scr(ice);
masked_old = old & (SCR_AIN12_SEL1 | SCR_AIN12_SEL0);
tmp = ucontrol->value.integer.value[0];
if (tmp == 2)
tmp = 3; /* binary 10 is not supported */
tmp <<= 4; /* shifting to SCR_AIN12_SEL0 */
if (tmp != masked_old) {
/* change requested */
switch (tmp) {
case SCR_AIN12_LINE:
new = old & ~(SCR_AIN12_SEL1 | SCR_AIN12_SEL0);
set_scr(ice, new);
/* turn off relay */
new &= ~SCR_RELAY;
set_scr(ice, new);
break;
case SCR_AIN12_MIC:
/* turn on relay */
new = old | SCR_RELAY;
set_scr(ice, new);
new = (new & ~SCR_AIN12_SEL1) | SCR_AIN12_SEL0;
set_scr(ice, new);
break;
case SCR_AIN12_LOWCUT:
/* turn on relay */
new = old | SCR_RELAY;
set_scr(ice, new);
new |= SCR_AIN12_SEL1 | SCR_AIN12_SEL0;
set_scr(ice, new);
break;
default:
snd_BUG();
}
return 1;
}
/* no change */
return 0;
}
static int qtet_php_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val;
/* if phantom voltage =48V, phantom on */
val = get_scr(ice) & SCR_PHP_V;
ucontrol->value.integer.value[0] = val ? 1 : 0;
return 0;
}
static int qtet_php_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old, new;
old = new = get_scr(ice);
if (ucontrol->value.integer.value[0] /* phantom on requested */
&& (~old & SCR_PHP_V)) /* 0 = voltage 5V */ {
/* is off, turn on */
/* turn voltage on first, = 1 */
new = old | SCR_PHP_V;
set_scr(ice, new);
/* turn phantom on, = 0 */
new &= ~SCR_PHP;
set_scr(ice, new);
} else if (!ucontrol->value.integer.value[0] && (old & SCR_PHP_V)) {
/* phantom off requested and 1 = voltage 48V */
/* is on, turn off */
/* turn voltage off first, = 0 */
new = old & ~SCR_PHP_V;
set_scr(ice, new);
/* turn phantom off, = 1 */
new |= SCR_PHP;
set_scr(ice, new);
}
if (old != new)
return 1;
/* no change */
return 0;
}
#define PRIV_SW(xid, xbit, xreg) [xid] = {.bit = xbit,\
.set_register = set_##xreg,\
.get_register = get_##xreg, }
#define PRIV_ENUM2(xid, xbit, xreg, xtext1, xtext2) [xid] = {.bit = xbit,\
.set_register = set_##xreg,\
.get_register = get_##xreg,\
.texts = {xtext1, xtext2} }
static struct qtet_kcontrol_private qtet_privates[] = {
PRIV_ENUM2(IN12_SEL, CPLD_IN12_SEL, cpld, "An In 1/2", "An In 3/4"),
PRIV_ENUM2(IN34_SEL, CPLD_IN34_SEL, cpld, "An In 3/4", "IEC958 In"),
PRIV_ENUM2(AIN34_SEL, SCR_AIN34_SEL, scr, "Line In 3/4", "Hi-Z"),
PRIV_ENUM2(COAX_OUT, CPLD_COAX_OUT, cpld, "IEC958", "I2S"),
PRIV_SW(IN12_MON12, MCR_IN12_MON12, mcr),
PRIV_SW(IN12_MON34, MCR_IN12_MON34, mcr),
PRIV_SW(IN34_MON12, MCR_IN34_MON12, mcr),
PRIV_SW(IN34_MON34, MCR_IN34_MON34, mcr),
PRIV_SW(OUT12_MON34, MCR_OUT12_MON34, mcr),
PRIV_SW(OUT34_MON12, MCR_OUT34_MON12, mcr),
};
static int qtet_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct qtet_kcontrol_private private =
qtet_privates[kcontrol->private_value];
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = ARRAY_SIZE(private.texts);
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item =
uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
private.texts[uinfo->value.enumerated.item]);
return 0;
}
static int qtet_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct qtet_kcontrol_private private =
qtet_privates[kcontrol->private_value];
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] =
(private.get_register(ice) & private.bit) ? 1 : 0;
return 0;
}
static int qtet_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct qtet_kcontrol_private private =
qtet_privates[kcontrol->private_value];
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old, new;
old = private.get_register(ice);
if (ucontrol->value.integer.value[0])
new = old | private.bit;
else
new = old & ~private.bit;
if (old != new) {
private.set_register(ice, new);
return 1;
}
/* no change */
return 0;
}
#define qtet_sw_info snd_ctl_boolean_mono_info
#define QTET_CONTROL(xname, xtype, xpriv) \
{.iface = SNDRV_CTL_ELEM_IFACE_MIXER,\
.name = xname,\
.info = qtet_##xtype##_info,\
.get = qtet_sw_get,\
.put = qtet_sw_put,\
.private_value = xpriv }
static struct snd_kcontrol_new qtet_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.info = qtet_sw_info,
.get = qtet_mute_get,
.put = qtet_mute_put,
.private_value = 0
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Phantom Power",
.info = qtet_sw_info,
.get = qtet_php_get,
.put = qtet_php_put,
.private_value = 0
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog In 1/2 Capture Switch",
.info = qtet_ain12_enum_info,
.get = qtet_ain12_sw_get,
.put = qtet_ain12_sw_put,
.private_value = 0
},
QTET_CONTROL("Analog In 3/4 Capture Switch", enum, AIN34_SEL),
QTET_CONTROL("PCM In 1/2 Capture Switch", enum, IN12_SEL),
QTET_CONTROL("PCM In 3/4 Capture Switch", enum, IN34_SEL),
QTET_CONTROL("Coax Output Source", enum, COAX_OUT),
QTET_CONTROL("Analog In 1/2 to Monitor 1/2", sw, IN12_MON12),
QTET_CONTROL("Analog In 1/2 to Monitor 3/4", sw, IN12_MON34),
QTET_CONTROL("Analog In 3/4 to Monitor 1/2", sw, IN34_MON12),
QTET_CONTROL("Analog In 3/4 to Monitor 3/4", sw, IN34_MON34),
QTET_CONTROL("Output 1/2 to Monitor 3/4", sw, OUT12_MON34),
QTET_CONTROL("Output 3/4 to Monitor 1/2", sw, OUT34_MON12),
};
static char *slave_vols[] = {
PCM_12_PLAYBACK_VOLUME,
PCM_34_PLAYBACK_VOLUME,
NULL
};
static
DECLARE_TLV_DB_SCALE(qtet_master_db_scale, -6350, 50, 1);
static struct snd_kcontrol *ctl_find(struct snd_card *card,
const char *name)
{
struct snd_ctl_elem_id sid;
memset(&sid, 0, sizeof(sid));
/* FIXME: strcpy is bad. */
strcpy(sid.name, name);
sid.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
return snd_ctl_find_id(card, &sid);
}
static void add_slaves(struct snd_card *card,
struct snd_kcontrol *master, char * const *list)
{
for (; *list; list++) {
struct snd_kcontrol *slave = ctl_find(card, *list);
if (slave)
snd_ctl_add_slave(master, slave);
}
}
static int qtet_add_controls(struct snd_ice1712 *ice)
{
struct qtet_spec *spec = ice->spec;
int err, i;
struct snd_kcontrol *vmaster;
err = snd_ice1712_akm4xxx_build_controls(ice);
if (err < 0)
return err;
for (i = 0; i < ARRAY_SIZE(qtet_controls); i++) {
err = snd_ctl_add(ice->card,
snd_ctl_new1(&qtet_controls[i], ice));
if (err < 0)
return err;
}
/* Create virtual master control */
vmaster = snd_ctl_make_virtual_master("Master Playback Volume",
qtet_master_db_scale);
if (!vmaster)
return -ENOMEM;
add_slaves(ice->card, vmaster, slave_vols);
err = snd_ctl_add(ice->card, vmaster);
if (err < 0)
return err;
/* only capture SPDIF over AK4113 */
err = snd_ak4113_build(spec->ak4113,
ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream);
if (err < 0)
return err;
return 0;
}
static inline int qtet_is_spdif_master(struct snd_ice1712 *ice)
{
/* CPLD_SYNC_SEL: 0 = internal, 1 = external (i.e. spdif master) */
return (get_cpld(ice) & CPLD_SYNC_SEL) ? 1 : 0;
}
static unsigned int qtet_get_rate(struct snd_ice1712 *ice)
{
int i;
unsigned char result;
result = get_cpld(ice) & CPLD_CKS_MASK;
for (i = 0; i < ARRAY_SIZE(cks_vals); i++)
if (cks_vals[i] == result)
return qtet_rates[i];
return 0;
}
static int get_cks_val(int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(qtet_rates); i++)
if (qtet_rates[i] == rate)
return cks_vals[i];
return 0;
}
/* setting new rate */
static void qtet_set_rate(struct snd_ice1712 *ice, unsigned int rate)
{
unsigned int new;
unsigned char val;
/* switching ice1724 to external clock - supplied by ext. circuits */
val = inb(ICEMT1724(ice, RATE));
outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE));
new = (get_cpld(ice) & ~CPLD_CKS_MASK) | get_cks_val(rate);
/* switch to internal clock, drop CPLD_SYNC_SEL */
new &= ~CPLD_SYNC_SEL;
/* printk(KERN_DEBUG "QT - set_rate: old %x, new %x\n",
get_cpld(ice), new); */
set_cpld(ice, new);
}
static inline unsigned char qtet_set_mclk(struct snd_ice1712 *ice,
unsigned int rate)
{
/* no change in master clock */
return 0;
}
/* setting clock to external - SPDIF */
static int qtet_set_spdif_clock(struct snd_ice1712 *ice, int type)
{
unsigned int old, new;
old = new = get_cpld(ice);
new &= ~(CPLD_CKS_MASK | CPLD_WORD_SEL);
switch (type) {
case EXT_SPDIF_TYPE:
new |= CPLD_EXT_SPDIF;
break;
case EXT_WORDCLOCK_1FS_TYPE:
new |= CPLD_EXT_WORDCLOCK_1FS;
break;
case EXT_WORDCLOCK_256FS_TYPE:
new |= CPLD_EXT_WORDCLOCK_256FS;
break;
default:
snd_BUG();
}
if (old != new) {
set_cpld(ice, new);
/* changed */
return 1;
}
return 0;
}
static int qtet_get_spdif_master_type(struct snd_ice1712 *ice)
{
unsigned int val;
int result;
val = get_cpld(ice);
/* checking only rate/clock-related bits */
val &= (CPLD_CKS_MASK | CPLD_WORD_SEL | CPLD_SYNC_SEL);
if (!(val & CPLD_SYNC_SEL)) {
/* switched to internal clock, is not any external type */
result = -1;
} else {
switch (val) {
case (CPLD_EXT_SPDIF):
result = EXT_SPDIF_TYPE;
break;
case (CPLD_EXT_WORDCLOCK_1FS):
result = EXT_WORDCLOCK_1FS_TYPE;
break;
case (CPLD_EXT_WORDCLOCK_256FS):
result = EXT_WORDCLOCK_256FS_TYPE;
break;
default:
/* undefined combination of external clock setup */
snd_BUG();
result = 0;
}
}
return result;
}
/* Called when ak4113 detects change in the input SPDIF stream */
static void qtet_ak4113_change(struct ak4113 *ak4113, unsigned char c0,
unsigned char c1)
{
struct snd_ice1712 *ice = ak4113->change_callback_private;
int rate;
if ((qtet_get_spdif_master_type(ice) == EXT_SPDIF_TYPE) &&
c1) {
/* only for SPDIF master mode, rate was changed */
rate = snd_ak4113_external_rate(ak4113);
/* printk(KERN_DEBUG "ak4113 - input rate changed to %d\n",
rate); */
qtet_akm_set_rate_val(ice->akm, rate);
}
}
/*
* If clock slaved to SPDIF-IN, setting runtime rate
* to the detected external rate
*/
static void qtet_spdif_in_open(struct snd_ice1712 *ice,
struct snd_pcm_substream *substream)
{
struct qtet_spec *spec = ice->spec;
struct snd_pcm_runtime *runtime = substream->runtime;
int rate;
if (qtet_get_spdif_master_type(ice) != EXT_SPDIF_TYPE)
/* not external SPDIF, no rate limitation */
return;
/* only external SPDIF can detect incoming sample rate */
rate = snd_ak4113_external_rate(spec->ak4113);
if (rate >= runtime->hw.rate_min && rate <= runtime->hw.rate_max) {
runtime->hw.rate_min = rate;
runtime->hw.rate_max = rate;
}
}
/*
* initialize the chip
*/
static int qtet_init(struct snd_ice1712 *ice)
{
static const unsigned char ak4113_init_vals[] = {
/* AK4113_REG_PWRDN */ AK4113_RST | AK4113_PWN |
AK4113_OCKS0 | AK4113_OCKS1,
/* AK4113_REQ_FORMAT */ AK4113_DIF_I24I2S | AK4113_VTX |
AK4113_DEM_OFF | AK4113_DEAU,
/* AK4113_REG_IO0 */ AK4113_OPS2 | AK4113_TXE |
AK4113_XTL_24_576M,
/* AK4113_REG_IO1 */ AK4113_EFH_1024LRCLK | AK4113_IPS(0),
/* AK4113_REG_INT0_MASK */ 0,
/* AK4113_REG_INT1_MASK */ 0,
/* AK4113_REG_DATDTS */ 0,
};
int err;
struct qtet_spec *spec;
struct snd_akm4xxx *ak;
unsigned char val;
/* switching ice1724 to external clock - supplied by ext. circuits */
val = inb(ICEMT1724(ice, RATE));
outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE));
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
/* qtet is clocked by Xilinx array */
ice->hw_rates = &qtet_rates_info;
ice->is_spdif_master = qtet_is_spdif_master;
ice->get_rate = qtet_get_rate;
ice->set_rate = qtet_set_rate;
ice->set_mclk = qtet_set_mclk;
ice->set_spdif_clock = qtet_set_spdif_clock;
ice->get_spdif_master_type = qtet_get_spdif_master_type;
ice->ext_clock_names = ext_clock_names;
ice->ext_clock_count = ARRAY_SIZE(ext_clock_names);
/* since Qtet can detect correct SPDIF-in rate, all streams can be
* limited to this specific rate */
ice->spdif.ops.open = ice->pro_open = qtet_spdif_in_open;
ice->spec = spec;
/* Mute Off */
/* SCR Initialize*/
/* keep codec power down first */
set_scr(ice, SCR_PHP);
udelay(1);
/* codec power up */
set_scr(ice, SCR_PHP | SCR_CODEC_PDN);
/* MCR Initialize */
set_mcr(ice, 0);
/* CPLD Initialize */
set_cpld(ice, 0);
ice->num_total_dacs = 2;
ice->num_total_adcs = 2;
ice->akm = kcalloc(2, sizeof(struct snd_akm4xxx), GFP_KERNEL);
ak = ice->akm;
if (!ak)
return -ENOMEM;
/* only one codec with two chips */
ice->akm_codecs = 1;
err = snd_ice1712_akm4xxx_init(ak, &akm_qtet_dac, NULL, ice);
if (err < 0)
return err;
err = snd_ak4113_create(ice->card,
qtet_ak4113_read,
qtet_ak4113_write,
ak4113_init_vals,
ice, &spec->ak4113);
if (err < 0)
return err;
/* callback for codecs rate setting */
spec->ak4113->change_callback = qtet_ak4113_change;
spec->ak4113->change_callback_private = ice;
/* AK41143 in Quartet can detect external rate correctly
* (i.e. check_flags = 0) */
spec->ak4113->check_flags = 0;
proc_init(ice);
qtet_set_rate(ice, 44100);
return 0;
}
static unsigned char qtet_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x28, /* clock 256(24MHz), mpu401, 1xADC,
1xDACs, SPDIF in */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0x78, /* 96k, 24bit, 192k */
[ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, in, out-ext */
[ICE_EEP2_GPIO_DIR] = 0x00, /* 0-7 inputs, switched to output
only during output operations */
[ICE_EEP2_GPIO_DIR1] = 0xff, /* 8-15 outputs */
[ICE_EEP2_GPIO_DIR2] = 0x00,
[ICE_EEP2_GPIO_MASK] = 0xff, /* changed only for OUT operations */
[ICE_EEP2_GPIO_MASK1] = 0x00,
[ICE_EEP2_GPIO_MASK2] = 0xff,
[ICE_EEP2_GPIO_STATE] = 0x00, /* inputs */
[ICE_EEP2_GPIO_STATE1] = 0x7d, /* all 1, but GPIO_CPLD_RW
and GPIO15 always zero */
[ICE_EEP2_GPIO_STATE2] = 0x00, /* inputs */
};
/* entry point */
struct snd_ice1712_card_info snd_vt1724_qtet_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_QTET,
.name = "Infrasonic Quartet",
.model = "quartet",
.chip_init = qtet_init,
.build_controls = qtet_add_controls,
.eeprom_size = sizeof(qtet_eeprom),
.eeprom_data = qtet_eeprom,
},
{ } /* terminator */
};
| gpl-2.0 |
assusdan/cyanogenmod_kernel_prestigio_muzed3 | drivers/input/joystick/joydump.c | 2652 | 4210 | /*
* Copyright (c) 1996-2001 Vojtech Pavlik
*/
/*
* This is just a very simple driver that can dump the data
* out of the joystick port into the syslog ...
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
#include <linux/gameport.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#define DRIVER_DESC "Gameport data dumper module"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
#define BUF_SIZE 256
struct joydump {
unsigned int time;
unsigned char data;
};
static int joydump_connect(struct gameport *gameport, struct gameport_driver *drv)
{
struct joydump *buf; /* all entries */
struct joydump *dump, *prev; /* one entry each */
int axes[4], buttons;
int i, j, t, timeout;
unsigned long flags;
unsigned char u;
printk(KERN_INFO "joydump: ,------------------ START ----------------.\n");
printk(KERN_INFO "joydump: | Dumping: %30s |\n", gameport->phys);
printk(KERN_INFO "joydump: | Speed: %28d kHz |\n", gameport->speed);
if (gameport_open(gameport, drv, GAMEPORT_MODE_RAW)) {
printk(KERN_INFO "joydump: | Raw mode not available - trying cooked. |\n");
if (gameport_open(gameport, drv, GAMEPORT_MODE_COOKED)) {
printk(KERN_INFO "joydump: | Cooked not available either. Failing. |\n");
printk(KERN_INFO "joydump: `------------------- END -----------------'\n");
return -ENODEV;
}
gameport_cooked_read(gameport, axes, &buttons);
for (i = 0; i < 4; i++)
printk(KERN_INFO "joydump: | Axis %d: %4d. |\n", i, axes[i]);
printk(KERN_INFO "joydump: | Buttons %02x. |\n", buttons);
printk(KERN_INFO "joydump: `------------------- END -----------------'\n");
}
timeout = gameport_time(gameport, 10000); /* 10 ms */
buf = kmalloc(BUF_SIZE * sizeof(struct joydump), GFP_KERNEL);
if (!buf) {
printk(KERN_INFO "joydump: no memory for testing\n");
goto jd_end;
}
dump = buf;
t = 0;
i = 1;
local_irq_save(flags);
u = gameport_read(gameport);
dump->data = u;
dump->time = t;
dump++;
gameport_trigger(gameport);
while (i < BUF_SIZE && t < timeout) {
dump->data = gameport_read(gameport);
if (dump->data ^ u) {
u = dump->data;
dump->time = t;
i++;
dump++;
}
t++;
}
local_irq_restore(flags);
/*
* Dump data.
*/
t = i;
dump = buf;
prev = dump;
printk(KERN_INFO "joydump: >------------------ DATA -----------------<\n");
printk(KERN_INFO "joydump: | index: %3d delta: %3d us data: ", 0, 0);
for (j = 7; j >= 0; j--)
printk("%d", (dump->data >> j) & 1);
printk(" |\n");
dump++;
for (i = 1; i < t; i++, dump++, prev++) {
printk(KERN_INFO "joydump: | index: %3d delta: %3d us data: ",
i, dump->time - prev->time);
for (j = 7; j >= 0; j--)
printk("%d", (dump->data >> j) & 1);
printk(" |\n");
}
kfree(buf);
jd_end:
printk(KERN_INFO "joydump: `------------------- END -----------------'\n");
return 0;
}
static void joydump_disconnect(struct gameport *gameport)
{
gameport_close(gameport);
}
static struct gameport_driver joydump_drv = {
.driver = {
.name = "joydump",
},
.description = DRIVER_DESC,
.connect = joydump_connect,
.disconnect = joydump_disconnect,
};
module_gameport_driver(joydump_drv);
| gpl-2.0 |
Cpasjuste/kernel_amazon_hdx-common | drivers/infiniband/ulp/srp/ib_srp.c | 2908 | 66329 | /*
* Copyright (c) 2005 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define pr_fmt(fmt) PFX fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/random.h>
#include <linux/jiffies.h>
#include <linux/atomic.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_dbg.h>
#include <scsi/srp.h>
#include <scsi/scsi_transport_srp.h>
#include "ib_srp.h"
#define DRV_NAME "ib_srp"
#define PFX DRV_NAME ": "
#define DRV_VERSION "0.2"
#define DRV_RELDATE "November 1, 2005"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
"v" DRV_VERSION " (" DRV_RELDATE ")");
MODULE_LICENSE("Dual BSD/GPL");
static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
static int topspin_workarounds = 1;
module_param(srp_sg_tablesize, uint, 0444);
MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
module_param(cmd_sg_entries, uint, 0444);
MODULE_PARM_DESC(cmd_sg_entries,
"Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
module_param(indirect_sg_entries, uint, 0444);
MODULE_PARM_DESC(indirect_sg_entries,
"Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
module_param(allow_ext_sg, bool, 0444);
MODULE_PARM_DESC(allow_ext_sg,
"Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device);
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
static struct ib_client srp_client = {
.name = "srp",
.add = srp_add_one,
.remove = srp_remove_one
};
static struct ib_sa_client srp_sa_client;
static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
{
return (struct srp_target_port *) host->hostdata;
}
static const char *srp_target_info(struct Scsi_Host *host)
{
return host_to_target(host)->target_name;
}
static int srp_target_is_topspin(struct srp_target_port *target)
{
static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
return topspin_workarounds &&
(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
!memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
}
static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
gfp_t gfp_mask,
enum dma_data_direction direction)
{
struct srp_iu *iu;
iu = kmalloc(sizeof *iu, gfp_mask);
if (!iu)
goto out;
iu->buf = kzalloc(size, gfp_mask);
if (!iu->buf)
goto out_free_iu;
iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
direction);
if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
goto out_free_buf;
iu->size = size;
iu->direction = direction;
return iu;
out_free_buf:
kfree(iu->buf);
out_free_iu:
kfree(iu);
out:
return NULL;
}
static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
{
if (!iu)
return;
ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
iu->direction);
kfree(iu->buf);
kfree(iu);
}
static void srp_qp_event(struct ib_event *event, void *context)
{
pr_debug("QP event %d\n", event->event);
}
static int srp_init_qp(struct srp_target_port *target,
struct ib_qp *qp)
{
struct ib_qp_attr *attr;
int ret;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr)
return -ENOMEM;
ret = ib_find_pkey(target->srp_host->srp_dev->dev,
target->srp_host->port,
be16_to_cpu(target->path.pkey),
&attr->pkey_index);
if (ret)
goto out;
attr->qp_state = IB_QPS_INIT;
attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE);
attr->port_num = target->srp_host->port;
ret = ib_modify_qp(qp, attr,
IB_QP_STATE |
IB_QP_PKEY_INDEX |
IB_QP_ACCESS_FLAGS |
IB_QP_PORT);
out:
kfree(attr);
return ret;
}
static int srp_new_cm_id(struct srp_target_port *target)
{
struct ib_cm_id *new_cm_id;
new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
srp_cm_handler, target);
if (IS_ERR(new_cm_id))
return PTR_ERR(new_cm_id);
if (target->cm_id)
ib_destroy_cm_id(target->cm_id);
target->cm_id = new_cm_id;
return 0;
}
static int srp_create_target_ib(struct srp_target_port *target)
{
struct ib_qp_init_attr *init_attr;
int ret;
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
if (!init_attr)
return -ENOMEM;
target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
if (IS_ERR(target->recv_cq)) {
ret = PTR_ERR(target->recv_cq);
goto err;
}
target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
if (IS_ERR(target->send_cq)) {
ret = PTR_ERR(target->send_cq);
goto err_recv_cq;
}
ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
init_attr->cap.max_recv_sge = 1;
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
init_attr->qp_type = IB_QPT_RC;
init_attr->send_cq = target->send_cq;
init_attr->recv_cq = target->recv_cq;
target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
if (IS_ERR(target->qp)) {
ret = PTR_ERR(target->qp);
goto err_send_cq;
}
ret = srp_init_qp(target, target->qp);
if (ret)
goto err_qp;
kfree(init_attr);
return 0;
err_qp:
ib_destroy_qp(target->qp);
err_send_cq:
ib_destroy_cq(target->send_cq);
err_recv_cq:
ib_destroy_cq(target->recv_cq);
err:
kfree(init_attr);
return ret;
}
static void srp_free_target_ib(struct srp_target_port *target)
{
int i;
ib_destroy_qp(target->qp);
ib_destroy_cq(target->send_cq);
ib_destroy_cq(target->recv_cq);
for (i = 0; i < SRP_RQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->rx_ring[i]);
for (i = 0; i < SRP_SQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->tx_ring[i]);
}
static void srp_path_rec_completion(int status,
struct ib_sa_path_rec *pathrec,
void *target_ptr)
{
struct srp_target_port *target = target_ptr;
target->status = status;
if (status)
shost_printk(KERN_ERR, target->scsi_host,
PFX "Got failed path rec status %d\n", status);
else
target->path = *pathrec;
complete(&target->done);
}
static int srp_lookup_path(struct srp_target_port *target)
{
target->path.numb_path = 1;
init_completion(&target->done);
target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
&target->path,
IB_SA_PATH_REC_SERVICE_ID |
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_NUMB_PATH |
IB_SA_PATH_REC_PKEY,
SRP_PATH_REC_TIMEOUT_MS,
GFP_KERNEL,
srp_path_rec_completion,
target, &target->path_query);
if (target->path_query_id < 0)
return target->path_query_id;
wait_for_completion(&target->done);
if (target->status < 0)
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path record query failed\n");
return target->status;
}
static int srp_send_req(struct srp_target_port *target)
{
struct {
struct ib_cm_req_param param;
struct srp_login_req priv;
} *req = NULL;
int status;
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
return -ENOMEM;
req->param.primary_path = &target->path;
req->param.alternate_path = NULL;
req->param.service_id = target->service_id;
req->param.qp_num = target->qp->qp_num;
req->param.qp_type = target->qp->qp_type;
req->param.private_data = &req->priv;
req->param.private_data_len = sizeof req->priv;
req->param.flow_control = 1;
get_random_bytes(&req->param.starting_psn, 4);
req->param.starting_psn &= 0xffffff;
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
req->param.responder_resources = 4;
req->param.remote_cm_response_timeout = 20;
req->param.local_cm_response_timeout = 20;
req->param.retry_count = 7;
req->param.rnr_retry_count = 7;
req->param.max_cm_retries = 15;
req->priv.opcode = SRP_LOGIN_REQ;
req->priv.tag = 0;
req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
/*
* In the published SRP specification (draft rev. 16a), the
* port identifier format is 8 bytes of ID extension followed
* by 8 bytes of GUID. Older drafts put the two halves in the
* opposite order, so that the GUID comes first.
*
* Targets conforming to these obsolete drafts can be
* recognized by the I/O Class they report.
*/
if (target->io_class == SRP_REV10_IB_IO_CLASS) {
memcpy(req->priv.initiator_port_id,
&target->path.sgid.global.interface_id, 8);
memcpy(req->priv.initiator_port_id + 8,
&target->initiator_ext, 8);
memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
} else {
memcpy(req->priv.initiator_port_id,
&target->initiator_ext, 8);
memcpy(req->priv.initiator_port_id + 8,
&target->path.sgid.global.interface_id, 8);
memcpy(req->priv.target_port_id, &target->id_ext, 8);
memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
}
/*
* Topspin/Cisco SRP targets will reject our login unless we
* zero out the first 8 bytes of our initiator port ID and set
* the second 8 bytes to the local node GUID.
*/
if (srp_target_is_topspin(target)) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Topspin/Cisco initiator port ID workaround "
"activated for target GUID %016llx\n",
(unsigned long long) be64_to_cpu(target->ioc_guid));
memset(req->priv.initiator_port_id, 0, 8);
memcpy(req->priv.initiator_port_id + 8,
&target->srp_host->srp_dev->dev->node_guid, 8);
}
status = ib_send_cm_req(target->cm_id, &req->param);
kfree(req);
return status;
}
static void srp_disconnect_target(struct srp_target_port *target)
{
/* XXX should send SRP_I_LOGOUT request */
init_completion(&target->done);
if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM DREQ failed\n");
return;
}
wait_for_completion(&target->done);
}
static bool srp_change_state(struct srp_target_port *target,
enum srp_target_state old,
enum srp_target_state new)
{
bool changed = false;
spin_lock_irq(&target->lock);
if (target->state == old) {
target->state = new;
changed = true;
}
spin_unlock_irq(&target->lock);
return changed;
}
static void srp_free_req_data(struct srp_target_port *target)
{
struct ib_device *ibdev = target->srp_host->srp_dev->dev;
struct srp_request *req;
int i;
for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
kfree(req->fmr_list);
kfree(req->map_page);
if (req->indirect_dma_addr) {
ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
target->indirect_size,
DMA_TO_DEVICE);
}
kfree(req->indirect_desc);
}
}
/**
* srp_del_scsi_host_attr() - Remove attributes defined in the host template.
* @shost: SCSI host whose attributes to remove from sysfs.
*
* Note: Any attributes defined in the host template and that did not exist
* before invocation of this function will be ignored.
*/
static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
{
struct device_attribute **attr;
for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
device_remove_file(&shost->shost_dev, *attr);
}
static void srp_remove_work(struct work_struct *work)
{
struct srp_target_port *target =
container_of(work, struct srp_target_port, work);
if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
return;
spin_lock(&target->srp_host->target_lock);
list_del(&target->list);
spin_unlock(&target->srp_host->target_lock);
srp_del_scsi_host_attr(target->scsi_host);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
srp_free_req_data(target);
scsi_host_put(target->scsi_host);
}
static int srp_connect_target(struct srp_target_port *target)
{
int retries = 3;
int ret;
ret = srp_lookup_path(target);
if (ret)
return ret;
while (1) {
init_completion(&target->done);
ret = srp_send_req(target);
if (ret)
return ret;
wait_for_completion(&target->done);
/*
* The CM event handling code will set status to
* SRP_PORT_REDIRECT if we get a port redirect REJ
* back, or SRP_DLID_REDIRECT if we get a lid/qp
* redirect REJ back.
*/
switch (target->status) {
case 0:
return 0;
case SRP_PORT_REDIRECT:
ret = srp_lookup_path(target);
if (ret)
return ret;
break;
case SRP_DLID_REDIRECT:
break;
case SRP_STALE_CONN:
/* Our current CM id was stale, and is now in timewait.
* Try to reconnect with a new one.
*/
if (!retries-- || srp_new_cm_id(target)) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"giving up on stale connection\n");
target->status = -ECONNRESET;
return target->status;
}
shost_printk(KERN_ERR, target->scsi_host, PFX
"retrying stale connection\n");
break;
default:
return target->status;
}
}
}
static void srp_unmap_data(struct scsi_cmnd *scmnd,
struct srp_target_port *target,
struct srp_request *req)
{
struct ib_device *ibdev = target->srp_host->srp_dev->dev;
struct ib_pool_fmr **pfmr;
if (!scsi_sglist(scmnd) ||
(scmnd->sc_data_direction != DMA_TO_DEVICE &&
scmnd->sc_data_direction != DMA_FROM_DEVICE))
return;
pfmr = req->fmr_list;
while (req->nfmr--)
ib_fmr_pool_unmap(*pfmr++);
ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
scmnd->sc_data_direction);
}
static void srp_remove_req(struct srp_target_port *target,
struct srp_request *req, s32 req_lim_delta)
{
unsigned long flags;
srp_unmap_data(req->scmnd, target, req);
spin_lock_irqsave(&target->lock, flags);
target->req_lim += req_lim_delta;
req->scmnd = NULL;
list_add_tail(&req->list, &target->free_reqs);
spin_unlock_irqrestore(&target->lock, flags);
}
static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
{
req->scmnd->result = DID_RESET << 16;
req->scmnd->scsi_done(req->scmnd);
srp_remove_req(target, req, 0);
}
static int srp_reconnect_target(struct srp_target_port *target)
{
struct ib_qp_attr qp_attr;
struct ib_wc wc;
int i, ret;
if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
return -EAGAIN;
srp_disconnect_target(target);
/*
* Now get a new local CM ID so that we avoid confusing the
* target in case things are really fouled up.
*/
ret = srp_new_cm_id(target);
if (ret)
goto err;
qp_attr.qp_state = IB_QPS_RESET;
ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
if (ret)
goto err;
ret = srp_init_qp(target, target->qp);
if (ret)
goto err;
while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
; /* nothing */
while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
; /* nothing */
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
if (req->scmnd)
srp_reset_req(target, req);
}
INIT_LIST_HEAD(&target->free_tx);
for (i = 0; i < SRP_SQ_SIZE; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
target->qp_in_error = 0;
ret = srp_connect_target(target);
if (ret)
goto err;
if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
ret = -EAGAIN;
return ret;
err:
shost_printk(KERN_ERR, target->scsi_host,
PFX "reconnect failed (%d), removing target port.\n", ret);
/*
* We couldn't reconnect, so kill our target port off.
* However, we have to defer the real removal because we
* are in the context of the SCSI error handler now, which
* will deadlock if we call scsi_remove_host().
*
* Schedule our work inside the lock to avoid a race with
* the flush_scheduled_work() in srp_remove_one().
*/
spin_lock_irq(&target->lock);
if (target->state == SRP_TARGET_CONNECTING) {
target->state = SRP_TARGET_DEAD;
INIT_WORK(&target->work, srp_remove_work);
queue_work(ib_wq, &target->work);
}
spin_unlock_irq(&target->lock);
return ret;
}
static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
unsigned int dma_len, u32 rkey)
{
struct srp_direct_buf *desc = state->desc;
desc->va = cpu_to_be64(dma_addr);
desc->key = cpu_to_be32(rkey);
desc->len = cpu_to_be32(dma_len);
state->total_len += dma_len;
state->desc++;
state->ndesc++;
}
static int srp_map_finish_fmr(struct srp_map_state *state,
struct srp_target_port *target)
{
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
if (!state->npages)
return 0;
if (state->npages == 1) {
srp_map_desc(state, state->base_dma_addr, state->fmr_len,
target->rkey);
state->npages = state->fmr_len = 0;
return 0;
}
fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
state->npages, io_addr);
if (IS_ERR(fmr))
return PTR_ERR(fmr);
*state->next_fmr++ = fmr;
state->nfmr++;
srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
state->npages = state->fmr_len = 0;
return 0;
}
static void srp_map_update_start(struct srp_map_state *state,
struct scatterlist *sg, int sg_index,
dma_addr_t dma_addr)
{
state->unmapped_sg = sg;
state->unmapped_index = sg_index;
state->unmapped_addr = dma_addr;
}
static int srp_map_sg_entry(struct srp_map_state *state,
struct srp_target_port *target,
struct scatterlist *sg, int sg_index,
int use_fmr)
{
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_device *ibdev = dev->dev;
dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
unsigned int len;
int ret;
if (!dma_len)
return 0;
if (use_fmr == SRP_MAP_NO_FMR) {
/* Once we're in direct map mode for a request, we don't
* go back to FMR mode, so no need to update anything
* other than the descriptor.
*/
srp_map_desc(state, dma_addr, dma_len, target->rkey);
return 0;
}
/* If we start at an offset into the FMR page, don't merge into
* the current FMR. Finish it out, and use the kernel's MR for this
* sg entry. This is to avoid potential bugs on some SRP targets
* that were never quite defined, but went away when the initiator
* avoided using FMR on such page fragments.
*/
if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
ret = srp_map_finish_fmr(state, target);
if (ret)
return ret;
srp_map_desc(state, dma_addr, dma_len, target->rkey);
srp_map_update_start(state, NULL, 0, 0);
return 0;
}
/* If this is the first sg to go into the FMR, save our position.
* We need to know the first unmapped entry, its index, and the
* first unmapped address within that entry to be able to restart
* mapping after an error.
*/
if (!state->unmapped_sg)
srp_map_update_start(state, sg, sg_index, dma_addr);
while (dma_len) {
if (state->npages == SRP_FMR_SIZE) {
ret = srp_map_finish_fmr(state, target);
if (ret)
return ret;
srp_map_update_start(state, sg, sg_index, dma_addr);
}
len = min_t(unsigned int, dma_len, dev->fmr_page_size);
if (!state->npages)
state->base_dma_addr = dma_addr;
state->pages[state->npages++] = dma_addr;
state->fmr_len += len;
dma_addr += len;
dma_len -= len;
}
/* If the last entry of the FMR wasn't a full page, then we need to
* close it out and start a new one -- we can only merge at page
* boundries.
*/
ret = 0;
if (len != dev->fmr_page_size) {
ret = srp_map_finish_fmr(state, target);
if (!ret)
srp_map_update_start(state, NULL, 0, 0);
}
return ret;
}
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_request *req)
{
struct scatterlist *scat, *sg;
struct srp_cmd *cmd = req->cmd->buf;
int i, len, nents, count, use_fmr;
struct srp_device *dev;
struct ib_device *ibdev;
struct srp_map_state state;
struct srp_indirect_buf *indirect_hdr;
u32 table_len;
u8 fmt;
if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
return sizeof (struct srp_cmd);
if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
scmnd->sc_data_direction != DMA_TO_DEVICE) {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled data direction %d\n",
scmnd->sc_data_direction);
return -EINVAL;
}
nents = scsi_sg_count(scmnd);
scat = scsi_sglist(scmnd);
dev = target->srp_host->srp_dev;
ibdev = dev->dev;
count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
if (unlikely(count == 0))
return -EIO;
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
if (count == 1) {
/*
* The midlayer only generated a single gather/scatter
* entry, or DMA mapping coalesced everything to a
* single entry. So a direct descriptor along with
* the DMA MR suffices.
*/
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
buf->key = cpu_to_be32(target->rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
req->nfmr = 0;
goto map_complete;
}
/* We have more than one scatter/gather entry, so build our indirect
* descriptor table, trying to merge as many entries with FMR as we
* can.
*/
indirect_hdr = (void *) cmd->add_data;
ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
state.desc = req->indirect_desc;
state.pages = req->map_page;
state.next_fmr = req->fmr_list;
use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
for_each_sg(scat, sg, count, i) {
if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
/* FMR mapping failed, so backtrack to the first
* unmapped entry and continue on without using FMR.
*/
dma_addr_t dma_addr;
unsigned int dma_len;
backtrack:
sg = state.unmapped_sg;
i = state.unmapped_index;
dma_addr = ib_sg_dma_address(ibdev, sg);
dma_len = ib_sg_dma_len(ibdev, sg);
dma_len -= (state.unmapped_addr - dma_addr);
dma_addr = state.unmapped_addr;
use_fmr = SRP_MAP_NO_FMR;
srp_map_desc(&state, dma_addr, dma_len, target->rkey);
}
}
if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
goto backtrack;
/* We've mapped the request, now pull as much of the indirect
* descriptor table as we can into the command buffer. If this
* target is not using an external indirect table, we are
* guaranteed to fit into the command, as the SCSI layer won't
* give us more S/G entries than we allow.
*/
req->nfmr = state.nfmr;
if (state.ndesc == 1) {
/* FMR mapping was able to collapse this to one entry,
* so use a direct descriptor.
*/
struct srp_direct_buf *buf = (void *) cmd->add_data;
*buf = req->indirect_desc[0];
goto map_complete;
}
if (unlikely(target->cmd_sg_cnt < state.ndesc &&
!target->allow_ext_sg)) {
shost_printk(KERN_ERR, target->scsi_host,
"Could not fit S/G list into SRP_CMD\n");
return -EIO;
}
count = min(state.ndesc, target->cmd_sg_cnt);
table_len = state.ndesc * sizeof (struct srp_direct_buf);
fmt = SRP_DATA_DESC_INDIRECT;
len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
len += count * sizeof (struct srp_direct_buf);
memcpy(indirect_hdr->desc_list, req->indirect_desc,
count * sizeof (struct srp_direct_buf));
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
indirect_hdr->table_desc.len = cpu_to_be32(table_len);
indirect_hdr->len = cpu_to_be32(state.total_len);
if (scmnd->sc_data_direction == DMA_TO_DEVICE)
cmd->data_out_desc_cnt = count;
else
cmd->data_in_desc_cnt = count;
ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
DMA_TO_DEVICE);
map_complete:
if (scmnd->sc_data_direction == DMA_TO_DEVICE)
cmd->buf_fmt = fmt << 4;
else
cmd->buf_fmt = fmt;
return len;
}
/*
* Return an IU and possible credit to the free pool
*/
static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
enum srp_iu_type iu_type)
{
unsigned long flags;
spin_lock_irqsave(&target->lock, flags);
list_add(&iu->list, &target->free_tx);
if (iu_type != SRP_IU_RSP)
++target->req_lim;
spin_unlock_irqrestore(&target->lock, flags);
}
/*
* Must be called with target->lock held to protect req_lim and free_tx.
* If IU is not sent, it must be returned using srp_put_tx_iu().
*
* Note:
* An upper limit for the number of allocated information units for each
* request type is:
* - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
* more than Scsi_Host.can_queue requests.
* - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
* - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
* one unanswered SRP request to an initiator.
*/
static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
enum srp_iu_type iu_type)
{
s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
struct srp_iu *iu;
srp_send_completion(target->send_cq, target);
if (list_empty(&target->free_tx))
return NULL;
/* Initiator responses to target requests do not consume credits */
if (iu_type != SRP_IU_RSP) {
if (target->req_lim <= rsv) {
++target->zero_req_lim;
return NULL;
}
--target->req_lim;
}
iu = list_first_entry(&target->free_tx, struct srp_iu, list);
list_del(&iu->list);
return iu;
}
static int srp_post_send(struct srp_target_port *target,
struct srp_iu *iu, int len)
{
struct ib_sge list;
struct ib_send_wr wr, *bad_wr;
list.addr = iu->dma;
list.length = len;
list.lkey = target->lkey;
wr.next = NULL;
wr.wr_id = (uintptr_t) iu;
wr.sg_list = &list;
wr.num_sge = 1;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
return ib_post_send(target->qp, &wr, &bad_wr);
}
static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
{
struct ib_recv_wr wr, *bad_wr;
struct ib_sge list;
list.addr = iu->dma;
list.length = iu->size;
list.lkey = target->lkey;
wr.next = NULL;
wr.wr_id = (uintptr_t) iu;
wr.sg_list = &list;
wr.num_sge = 1;
return ib_post_recv(target->qp, &wr, &bad_wr);
}
static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
{
struct srp_request *req;
struct scsi_cmnd *scmnd;
unsigned long flags;
if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
spin_lock_irqsave(&target->lock, flags);
target->req_lim += be32_to_cpu(rsp->req_lim_delta);
spin_unlock_irqrestore(&target->lock, flags);
target->tsk_mgmt_status = -1;
if (be32_to_cpu(rsp->resp_data_len) >= 4)
target->tsk_mgmt_status = rsp->data[3];
complete(&target->tsk_mgmt_done);
} else {
req = &target->req_ring[rsp->tag];
scmnd = req->scmnd;
if (!scmnd)
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %016llx\n",
(unsigned long long) rsp->tag);
scmnd->result = rsp->status;
if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
memcpy(scmnd->sense_buffer, rsp->data +
be32_to_cpu(rsp->resp_data_len),
min_t(int, be32_to_cpu(rsp->sense_data_len),
SCSI_SENSE_BUFFERSIZE));
}
if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
scmnd->host_scribble = NULL;
scmnd->scsi_done(scmnd);
}
}
static int srp_response_common(struct srp_target_port *target, s32 req_delta,
void *rsp, int len)
{
struct ib_device *dev = target->srp_host->srp_dev->dev;
unsigned long flags;
struct srp_iu *iu;
int err;
spin_lock_irqsave(&target->lock, flags);
target->req_lim += req_delta;
iu = __srp_get_tx_iu(target, SRP_IU_RSP);
spin_unlock_irqrestore(&target->lock, flags);
if (!iu) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"no IU available to send response\n");
return 1;
}
ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
memcpy(iu->buf, rsp, len);
ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
err = srp_post_send(target, iu, len);
if (err) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"unable to post response: %d\n", err);
srp_put_tx_iu(target, iu, SRP_IU_RSP);
}
return err;
}
static void srp_process_cred_req(struct srp_target_port *target,
struct srp_cred_req *req)
{
struct srp_cred_rsp rsp = {
.opcode = SRP_CRED_RSP,
.tag = req->tag,
};
s32 delta = be32_to_cpu(req->req_lim_delta);
if (srp_response_common(target, delta, &rsp, sizeof rsp))
shost_printk(KERN_ERR, target->scsi_host, PFX
"problems processing SRP_CRED_REQ\n");
}
static void srp_process_aer_req(struct srp_target_port *target,
struct srp_aer_req *req)
{
struct srp_aer_rsp rsp = {
.opcode = SRP_AER_RSP,
.tag = req->tag,
};
s32 delta = be32_to_cpu(req->req_lim_delta);
shost_printk(KERN_ERR, target->scsi_host, PFX
"ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
if (srp_response_common(target, delta, &rsp, sizeof rsp))
shost_printk(KERN_ERR, target->scsi_host, PFX
"problems processing SRP_AER_REQ\n");
}
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
int res;
u8 opcode;
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
opcode = *(u8 *) iu->buf;
if (0) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "recv completion, opcode 0x%02x\n", opcode);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
iu->buf, wc->byte_len, true);
}
switch (opcode) {
case SRP_RSP:
srp_process_rsp(target, iu->buf);
break;
case SRP_CRED_REQ:
srp_process_cred_req(target, iu->buf);
break;
case SRP_AER_REQ:
srp_process_aer_req(target, iu->buf);
break;
case SRP_T_LOGOUT:
/* XXX Handle target logout */
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Got target logout request\n");
break;
default:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled SRP opcode 0x%02x\n", opcode);
break;
}
ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
res = srp_post_recv(target, iu);
if (res != 0)
shost_printk(KERN_ERR, target->scsi_host,
PFX "Recv failed with error code %d\n", res);
}
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
struct ib_wc wc;
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(cq, 1, &wc) > 0) {
if (wc.status) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "failed receive status %d\n",
wc.status);
target->qp_in_error = 1;
break;
}
srp_handle_recv(target, &wc);
}
}
static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
struct ib_wc wc;
struct srp_iu *iu;
while (ib_poll_cq(cq, 1, &wc) > 0) {
if (wc.status) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "failed send status %d\n",
wc.status);
target->qp_in_error = 1;
break;
}
iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
list_add(&iu->list, &target->free_tx);
}
}
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(shost);
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
struct ib_device *dev;
unsigned long flags;
int len;
if (target->state == SRP_TARGET_CONNECTING)
goto err;
if (target->state == SRP_TARGET_DEAD ||
target->state == SRP_TARGET_REMOVED) {
scmnd->result = DID_BAD_TARGET << 16;
scmnd->scsi_done(scmnd);
return 0;
}
spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu)
goto err_unlock;
req = list_first_entry(&target->free_reqs, struct srp_request, list);
list_del(&req->list);
spin_unlock_irqrestore(&target->lock, flags);
dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
DMA_TO_DEVICE);
scmnd->result = 0;
scmnd->host_scribble = (void *) req;
cmd = iu->buf;
memset(cmd, 0, sizeof *cmd);
cmd->opcode = SRP_CMD;
cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
cmd->tag = req->index;
memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
req->scmnd = scmnd;
req->cmd = iu;
len = srp_map_data(scmnd, target, req);
if (len < 0) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "Failed to map data\n");
goto err_iu;
}
ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
DMA_TO_DEVICE);
if (srp_post_send(target, iu, len)) {
shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
goto err_unmap;
}
return 0;
err_unmap:
srp_unmap_data(scmnd, target, req);
err_iu:
srp_put_tx_iu(target, iu, SRP_IU_CMD);
spin_lock_irqsave(&target->lock, flags);
list_add(&req->list, &target->free_reqs);
err_unlock:
spin_unlock_irqrestore(&target->lock, flags);
err:
return SCSI_MLQUEUE_HOST_BUSY;
}
static int srp_alloc_iu_bufs(struct srp_target_port *target)
{
int i;
for (i = 0; i < SRP_RQ_SIZE; ++i) {
target->rx_ring[i] = srp_alloc_iu(target->srp_host,
target->max_ti_iu_len,
GFP_KERNEL, DMA_FROM_DEVICE);
if (!target->rx_ring[i])
goto err;
}
for (i = 0; i < SRP_SQ_SIZE; ++i) {
target->tx_ring[i] = srp_alloc_iu(target->srp_host,
target->max_iu_len,
GFP_KERNEL, DMA_TO_DEVICE);
if (!target->tx_ring[i])
goto err;
list_add(&target->tx_ring[i]->list, &target->free_tx);
}
return 0;
err:
for (i = 0; i < SRP_RQ_SIZE; ++i) {
srp_free_iu(target->srp_host, target->rx_ring[i]);
target->rx_ring[i] = NULL;
}
for (i = 0; i < SRP_SQ_SIZE; ++i) {
srp_free_iu(target->srp_host, target->tx_ring[i]);
target->tx_ring[i] = NULL;
}
return -ENOMEM;
}
static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
struct srp_login_rsp *lrsp,
struct srp_target_port *target)
{
struct ib_qp_attr *qp_attr = NULL;
int attr_mask = 0;
int ret;
int i;
if (lrsp->opcode == SRP_LOGIN_RSP) {
target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
/*
* Reserve credits for task management so we don't
* bounce requests back to the SCSI mid-layer.
*/
target->scsi_host->can_queue
= min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
target->scsi_host->can_queue);
} else {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
ret = -ECONNRESET;
goto error;
}
if (!target->rx_ring[0]) {
ret = srp_alloc_iu_bufs(target);
if (ret)
goto error;
}
ret = -ENOMEM;
qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
if (!qp_attr)
goto error;
qp_attr->qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
if (ret)
goto error_free;
ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
if (ret)
goto error_free;
for (i = 0; i < SRP_RQ_SIZE; i++) {
struct srp_iu *iu = target->rx_ring[i];
ret = srp_post_recv(target, iu);
if (ret)
goto error_free;
}
qp_attr->qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
if (ret)
goto error_free;
ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
if (ret)
goto error_free;
ret = ib_send_cm_rtu(cm_id, NULL, 0);
error_free:
kfree(qp_attr);
error:
target->status = ret;
}
static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event,
struct srp_target_port *target)
{
struct Scsi_Host *shost = target->scsi_host;
struct ib_class_port_info *cpi;
int opcode;
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
target->path.dlid = cpi->redirect_lid;
target->path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
target->status = target->path.dlid ?
SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
break;
case IB_CM_REJ_PORT_REDIRECT:
if (srp_target_is_topspin(target)) {
/*
* Topspin/Cisco SRP gateways incorrectly send
* reject reason code 25 when they mean 24
* (port redirect).
*/
memcpy(target->path.dgid.raw,
event->param.rej_rcvd.ari, 16);
shost_printk(KERN_DEBUG, shost,
PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
(unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
(unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
target->status = SRP_PORT_REDIRECT;
} else {
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
target->status = -ECONNRESET;
}
break;
case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
target->status = -ECONNRESET;
break;
case IB_CM_REJ_CONSUMER_DEFINED:
opcode = *(u8 *) event->private_data;
if (opcode == SRP_LOGIN_REJ) {
struct srp_login_rej *rej = event->private_data;
u32 reason = be32_to_cpu(rej->reason);
if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
shost_printk(KERN_WARNING, shost,
PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
else
shost_printk(KERN_WARNING, shost,
PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
} else
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
" opcode 0x%02x\n", opcode);
target->status = -ECONNRESET;
break;
case IB_CM_REJ_STALE_CONN:
shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
target->status = SRP_STALE_CONN;
break;
default:
shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
event->param.rej_rcvd.reason);
target->status = -ECONNRESET;
}
}
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct srp_target_port *target = cm_id->context;
int comp = 0;
switch (event->event) {
case IB_CM_REQ_ERROR:
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM REQ failed\n");
comp = 1;
target->status = -ECONNRESET;
break;
case IB_CM_REP_RECEIVED:
comp = 1;
srp_cm_rep_handler(cm_id, event->private_data, target);
break;
case IB_CM_REJ_RECEIVED:
shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
comp = 1;
srp_cm_rej_handler(cm_id, event, target);
break;
case IB_CM_DREQ_RECEIVED:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "DREQ received - connection closed\n");
if (ib_send_cm_drep(cm_id, NULL, 0))
shost_printk(KERN_ERR, target->scsi_host,
PFX "Sending CM DREP failed\n");
break;
case IB_CM_TIMEWAIT_EXIT:
shost_printk(KERN_ERR, target->scsi_host,
PFX "connection closed\n");
comp = 1;
target->status = 0;
break;
case IB_CM_MRA_RECEIVED:
case IB_CM_DREQ_ERROR:
case IB_CM_DREP_RECEIVED:
break;
default:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled CM event %d\n", event->event);
break;
}
if (comp)
complete(&target->done);
return 0;
}
static int srp_send_tsk_mgmt(struct srp_target_port *target,
u64 req_tag, unsigned int lun, u8 func)
{
struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
if (target->state == SRP_TARGET_DEAD ||
target->state == SRP_TARGET_REMOVED)
return -1;
init_completion(&target->tsk_mgmt_done);
spin_lock_irq(&target->lock);
iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
spin_unlock_irq(&target->lock);
if (!iu)
return -1;
ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
tsk_mgmt = iu->buf;
memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
tsk_mgmt->tsk_mgmt_func = func;
tsk_mgmt->task_tag = req_tag;
ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
return -1;
}
if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
return -1;
return 0;
}
static int srp_abort(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
int ret = SUCCESS;
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
if (!req || target->qp_in_error)
return FAILED;
if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
SRP_TSK_ABORT_TASK))
return FAILED;
if (req->scmnd) {
if (!target->tsk_mgmt_status) {
srp_remove_req(target, req, 0);
scmnd->result = DID_ABORT << 16;
} else
ret = FAILED;
}
return ret;
}
static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
int i;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
if (target->qp_in_error)
return FAILED;
if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
SRP_TSK_LUN_RESET))
return FAILED;
if (target->tsk_mgmt_status)
return FAILED;
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
if (req->scmnd && req->scmnd->device == scmnd->device)
srp_reset_req(target, req);
}
return SUCCESS;
}
static int srp_reset_host(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
int ret = FAILED;
shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
if (!srp_reconnect_target(target))
ret = SUCCESS;
return ret;
}
static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "0x%016llx\n",
(unsigned long long) be64_to_cpu(target->id_ext));
}
static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "0x%016llx\n",
(unsigned long long) be64_to_cpu(target->ioc_guid));
}
static ssize_t show_service_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "0x%016llx\n",
(unsigned long long) be64_to_cpu(target->service_id));
}
static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
}
static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "%pI6\n", target->path.dgid.raw);
}
static ssize_t show_orig_dgid(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "%pI6\n", target->orig_dgid);
}
static ssize_t show_req_lim(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "%d\n", target->req_lim);
}
static ssize_t show_zero_req_lim(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "%d\n", target->zero_req_lim);
}
static ssize_t show_local_ib_port(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "%d\n", target->srp_host->port);
}
static ssize_t show_local_ib_device(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
}
static ssize_t show_cmd_sg_entries(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "%u\n", target->cmd_sg_cnt);
}
static ssize_t show_allow_ext_sg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
}
static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
static struct device_attribute *srp_host_attrs[] = {
&dev_attr_id_ext,
&dev_attr_ioc_guid,
&dev_attr_service_id,
&dev_attr_pkey,
&dev_attr_dgid,
&dev_attr_orig_dgid,
&dev_attr_req_lim,
&dev_attr_zero_req_lim,
&dev_attr_local_ib_port,
&dev_attr_local_ib_device,
&dev_attr_cmd_sg_entries,
&dev_attr_allow_ext_sg,
NULL
};
static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
.sg_tablesize = SRP_DEF_SG_TABLESIZE,
.can_queue = SRP_CMD_SQ_SIZE,
.this_id = -1,
.cmd_per_lun = SRP_CMD_SQ_SIZE,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs
};
static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
{
struct srp_rport_identifiers ids;
struct srp_rport *rport;
sprintf(target->target_name, "SRP.T10:%016llX",
(unsigned long long) be64_to_cpu(target->id_ext));
if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
return -ENODEV;
memcpy(ids.port_id, &target->id_ext, 8);
memcpy(ids.port_id + 8, &target->ioc_guid, 8);
ids.roles = SRP_RPORT_ROLE_TARGET;
rport = srp_rport_add(target->scsi_host, &ids);
if (IS_ERR(rport)) {
scsi_remove_host(target->scsi_host);
return PTR_ERR(rport);
}
spin_lock(&host->target_lock);
list_add_tail(&target->list, &host->target_list);
spin_unlock(&host->target_lock);
target->state = SRP_TARGET_LIVE;
scsi_scan_target(&target->scsi_host->shost_gendev,
0, target->scsi_id, SCAN_WILD_CARD, 0);
return 0;
}
static void srp_release_dev(struct device *dev)
{
struct srp_host *host =
container_of(dev, struct srp_host, dev);
complete(&host->released);
}
static struct class srp_class = {
.name = "infiniband_srp",
.dev_release = srp_release_dev
};
/*
* Target ports are added by writing
*
* id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
* pkey=<P_Key>,service_id=<service ID>
*
* to the add_target sysfs attribute.
*/
enum {
SRP_OPT_ERR = 0,
SRP_OPT_ID_EXT = 1 << 0,
SRP_OPT_IOC_GUID = 1 << 1,
SRP_OPT_DGID = 1 << 2,
SRP_OPT_PKEY = 1 << 3,
SRP_OPT_SERVICE_ID = 1 << 4,
SRP_OPT_MAX_SECT = 1 << 5,
SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
SRP_OPT_IO_CLASS = 1 << 7,
SRP_OPT_INITIATOR_EXT = 1 << 8,
SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
SRP_OPT_ALLOW_EXT_SG = 1 << 10,
SRP_OPT_SG_TABLESIZE = 1 << 11,
SRP_OPT_ALL = (SRP_OPT_ID_EXT |
SRP_OPT_IOC_GUID |
SRP_OPT_DGID |
SRP_OPT_PKEY |
SRP_OPT_SERVICE_ID),
};
static const match_table_t srp_opt_tokens = {
{ SRP_OPT_ID_EXT, "id_ext=%s" },
{ SRP_OPT_IOC_GUID, "ioc_guid=%s" },
{ SRP_OPT_DGID, "dgid=%s" },
{ SRP_OPT_PKEY, "pkey=%x" },
{ SRP_OPT_SERVICE_ID, "service_id=%s" },
{ SRP_OPT_MAX_SECT, "max_sect=%d" },
{ SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
{ SRP_OPT_IO_CLASS, "io_class=%x" },
{ SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
{ SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
{ SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
{ SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
{ SRP_OPT_ERR, NULL }
};
static int srp_parse_options(const char *buf, struct srp_target_port *target)
{
char *options, *sep_opt;
char *p;
char dgid[3];
substring_t args[MAX_OPT_ARGS];
int opt_mask = 0;
int token;
int ret = -EINVAL;
int i;
options = kstrdup(buf, GFP_KERNEL);
if (!options)
return -ENOMEM;
sep_opt = options;
while ((p = strsep(&sep_opt, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, srp_opt_tokens, args);
opt_mask |= token;
switch (token) {
case SRP_OPT_ID_EXT:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
kfree(p);
break;
case SRP_OPT_IOC_GUID:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
kfree(p);
break;
case SRP_OPT_DGID:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
if (strlen(p) != 32) {
pr_warn("bad dest GID parameter '%s'\n", p);
kfree(p);
goto out;
}
for (i = 0; i < 16; ++i) {
strlcpy(dgid, p + i * 2, 3);
target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
}
kfree(p);
memcpy(target->orig_dgid, target->path.dgid.raw, 16);
break;
case SRP_OPT_PKEY:
if (match_hex(args, &token)) {
pr_warn("bad P_Key parameter '%s'\n", p);
goto out;
}
target->path.pkey = cpu_to_be16(token);
break;
case SRP_OPT_SERVICE_ID:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
target->path.service_id = target->service_id;
kfree(p);
break;
case SRP_OPT_MAX_SECT:
if (match_int(args, &token)) {
pr_warn("bad max sect parameter '%s'\n", p);
goto out;
}
target->scsi_host->max_sectors = token;
break;
case SRP_OPT_MAX_CMD_PER_LUN:
if (match_int(args, &token)) {
pr_warn("bad max cmd_per_lun parameter '%s'\n",
p);
goto out;
}
target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
break;
case SRP_OPT_IO_CLASS:
if (match_hex(args, &token)) {
pr_warn("bad IO class parameter '%s'\n", p);
goto out;
}
if (token != SRP_REV10_IB_IO_CLASS &&
token != SRP_REV16A_IB_IO_CLASS) {
pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
token, SRP_REV10_IB_IO_CLASS,
SRP_REV16A_IB_IO_CLASS);
goto out;
}
target->io_class = token;
break;
case SRP_OPT_INITIATOR_EXT:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
kfree(p);
break;
case SRP_OPT_CMD_SG_ENTRIES:
if (match_int(args, &token) || token < 1 || token > 255) {
pr_warn("bad max cmd_sg_entries parameter '%s'\n",
p);
goto out;
}
target->cmd_sg_cnt = token;
break;
case SRP_OPT_ALLOW_EXT_SG:
if (match_int(args, &token)) {
pr_warn("bad allow_ext_sg parameter '%s'\n", p);
goto out;
}
target->allow_ext_sg = !!token;
break;
case SRP_OPT_SG_TABLESIZE:
if (match_int(args, &token) || token < 1 ||
token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
pr_warn("bad max sg_tablesize parameter '%s'\n",
p);
goto out;
}
target->sg_tablesize = token;
break;
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
goto out;
}
}
if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
ret = 0;
else
for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
!(srp_opt_tokens[i].token & opt_mask))
pr_warn("target creation request is missing parameter '%s'\n",
srp_opt_tokens[i].pattern);
out:
kfree(options);
return ret;
}
static ssize_t srp_create_target(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct srp_host *host =
container_of(dev, struct srp_host, dev);
struct Scsi_Host *target_host;
struct srp_target_port *target;
struct ib_device *ibdev = host->srp_dev->dev;
dma_addr_t dma_addr;
int i, ret;
target_host = scsi_host_alloc(&srp_template,
sizeof (struct srp_target_port));
if (!target_host)
return -ENOMEM;
target_host->transportt = ib_srp_transport_template;
target_host->max_channel = 0;
target_host->max_id = 1;
target_host->max_lun = SRP_MAX_LUN;
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
target = host_to_target(target_host);
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
target->lkey = host->srp_dev->mr->lkey;
target->rkey = host->srp_dev->mr->rkey;
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
ret = srp_parse_options(buf, target);
if (ret)
goto err;
if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
target->cmd_sg_cnt < target->sg_tablesize) {
pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
target->sg_tablesize = target->cmd_sg_cnt;
}
target_host->sg_tablesize = target->sg_tablesize;
target->indirect_size = target->sg_tablesize *
sizeof (struct srp_direct_buf);
target->max_iu_len = sizeof (struct srp_cmd) +
sizeof (struct srp_indirect_buf) +
target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->free_tx);
INIT_LIST_HEAD(&target->free_reqs);
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
GFP_KERNEL);
req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
GFP_KERNEL);
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
if (!req->fmr_list || !req->map_page || !req->indirect_desc)
goto err_free_mem;
dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
target->indirect_size,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(ibdev, dma_addr))
goto err_free_mem;
req->indirect_dma_addr = dma_addr;
req->index = i;
list_add_tail(&req->list, &target->free_reqs);
}
ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
shost_printk(KERN_DEBUG, target->scsi_host, PFX
"new target: id_ext %016llx ioc_guid %016llx pkey %04x "
"service_id %016llx dgid %pI6\n",
(unsigned long long) be64_to_cpu(target->id_ext),
(unsigned long long) be64_to_cpu(target->ioc_guid),
be16_to_cpu(target->path.pkey),
(unsigned long long) be64_to_cpu(target->service_id),
target->path.dgid.raw);
ret = srp_create_target_ib(target);
if (ret)
goto err_free_mem;
ret = srp_new_cm_id(target);
if (ret)
goto err_free_ib;
target->qp_in_error = 0;
ret = srp_connect_target(target);
if (ret) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "Connection failed\n");
goto err_cm_id;
}
ret = srp_add_target(host, target);
if (ret)
goto err_disconnect;
return count;
err_disconnect:
srp_disconnect_target(target);
err_cm_id:
ib_destroy_cm_id(target->cm_id);
err_free_ib:
srp_free_target_ib(target);
err_free_mem:
srp_free_req_data(target);
err:
scsi_host_put(target_host);
return ret;
}
static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
return sprintf(buf, "%s\n", host->srp_dev->dev->name);
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
static ssize_t show_port(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
return sprintf(buf, "%d\n", host->port);
}
static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
{
struct srp_host *host;
host = kzalloc(sizeof *host, GFP_KERNEL);
if (!host)
return NULL;
INIT_LIST_HEAD(&host->target_list);
spin_lock_init(&host->target_lock);
init_completion(&host->released);
host->srp_dev = device;
host->port = port;
host->dev.class = &srp_class;
host->dev.parent = device->dev->dma_device;
dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
if (device_register(&host->dev))
goto free_host;
if (device_create_file(&host->dev, &dev_attr_add_target))
goto err_class;
if (device_create_file(&host->dev, &dev_attr_ibdev))
goto err_class;
if (device_create_file(&host->dev, &dev_attr_port))
goto err_class;
return host;
err_class:
device_unregister(&host->dev);
free_host:
kfree(host);
return NULL;
}
static void srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct ib_device_attr *dev_attr;
struct ib_fmr_pool_param fmr_param;
struct srp_host *host;
int max_pages_per_fmr, fmr_page_shift, s, e, p;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
if (!dev_attr)
return;
if (ib_query_device(device, dev_attr)) {
pr_warn("Query device failed for %s\n", device->name);
goto free_attr;
}
srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
if (!srp_dev)
goto free_attr;
/*
* Use the smallest page size supported by the HCA, down to a
* minimum of 4096 bytes. We're unlikely to build large sglists
* out of smaller entries.
*/
fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
srp_dev->fmr_page_size = 1 << fmr_page_shift;
srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
INIT_LIST_HEAD(&srp_dev->dev_list);
srp_dev->dev = device;
srp_dev->pd = ib_alloc_pd(device);
if (IS_ERR(srp_dev->pd))
goto free_dev;
srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE);
if (IS_ERR(srp_dev->mr))
goto err_pd;
for (max_pages_per_fmr = SRP_FMR_SIZE;
max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
memset(&fmr_param, 0, sizeof fmr_param);
fmr_param.pool_size = SRP_FMR_POOL_SIZE;
fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
fmr_param.cache = 1;
fmr_param.max_pages_per_fmr = max_pages_per_fmr;
fmr_param.page_shift = fmr_page_shift;
fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
if (!IS_ERR(srp_dev->fmr_pool))
break;
}
if (IS_ERR(srp_dev->fmr_pool))
srp_dev->fmr_pool = NULL;
if (device->node_type == RDMA_NODE_IB_SWITCH) {
s = 0;
e = 0;
} else {
s = 1;
e = device->phys_port_cnt;
}
for (p = s; p <= e; ++p) {
host = srp_add_port(srp_dev, p);
if (host)
list_add_tail(&host->list, &srp_dev->dev_list);
}
ib_set_client_data(device, &srp_client, srp_dev);
goto free_attr;
err_pd:
ib_dealloc_pd(srp_dev->pd);
free_dev:
kfree(srp_dev);
free_attr:
kfree(dev_attr);
}
static void srp_remove_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct srp_host *host, *tmp_host;
LIST_HEAD(target_list);
struct srp_target_port *target, *tmp_target;
srp_dev = ib_get_client_data(device, &srp_client);
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
device_unregister(&host->dev);
/*
* Wait for the sysfs entry to go away, so that no new
* target ports can be created.
*/
wait_for_completion(&host->released);
/*
* Mark all target ports as removed, so we stop queueing
* commands and don't try to reconnect.
*/
spin_lock(&host->target_lock);
list_for_each_entry(target, &host->target_list, list) {
spin_lock_irq(&target->lock);
target->state = SRP_TARGET_REMOVED;
spin_unlock_irq(&target->lock);
}
spin_unlock(&host->target_lock);
/*
* Wait for any reconnection tasks that may have
* started before we marked our target ports as
* removed, and any target port removal tasks.
*/
flush_workqueue(ib_wq);
list_for_each_entry_safe(target, tmp_target,
&host->target_list, list) {
srp_del_scsi_host_attr(target->scsi_host);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
srp_disconnect_target(target);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
srp_free_req_data(target);
scsi_host_put(target->scsi_host);
}
kfree(host);
}
if (srp_dev->fmr_pool)
ib_destroy_fmr_pool(srp_dev->fmr_pool);
ib_dereg_mr(srp_dev->mr);
ib_dealloc_pd(srp_dev->pd);
kfree(srp_dev);
}
static struct srp_function_template ib_srp_transport_functions = {
};
static int __init srp_init_module(void)
{
int ret;
BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
if (srp_sg_tablesize) {
pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
if (!cmd_sg_entries)
cmd_sg_entries = srp_sg_tablesize;
}
if (!cmd_sg_entries)
cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
if (cmd_sg_entries > 255) {
pr_warn("Clamping cmd_sg_entries to 255\n");
cmd_sg_entries = 255;
}
if (!indirect_sg_entries)
indirect_sg_entries = cmd_sg_entries;
else if (indirect_sg_entries < cmd_sg_entries) {
pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
cmd_sg_entries);
indirect_sg_entries = cmd_sg_entries;
}
ib_srp_transport_template =
srp_attach_transport(&ib_srp_transport_functions);
if (!ib_srp_transport_template)
return -ENOMEM;
ret = class_register(&srp_class);
if (ret) {
pr_err("couldn't register class infiniband_srp\n");
srp_release_transport(ib_srp_transport_template);
return ret;
}
ib_sa_register_client(&srp_sa_client);
ret = ib_register_client(&srp_client);
if (ret) {
pr_err("couldn't register IB client\n");
srp_release_transport(ib_srp_transport_template);
ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
return ret;
}
return 0;
}
static void __exit srp_cleanup_module(void)
{
ib_unregister_client(&srp_client);
ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
srp_release_transport(ib_srp_transport_template);
}
module_init(srp_init_module);
module_exit(srp_cleanup_module);
| gpl-2.0 |
UDOOboard/Kernel_Unico | arch/arm/mach-s3c2410/usb-simtec.c | 4188 | 2873 | /* linux/arch/arm/mach-s3c2410/usb-simtec.c
*
* Copyright 2004-2005 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* http://www.simtec.co.uk/products/EB2410ITX/
*
* Simtec BAST and Thorcom VR1000 USB port support functions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define DEBUG
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/gpio.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/io.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/bast-map.h>
#include <mach/bast-irq.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <plat/usb-control.h>
#include <plat/devs.h>
#include "usb-simtec.h"
/* control power and monitor over-current events on various Simtec
* designed boards.
*/
static unsigned int power_state[2];
static void
usb_simtec_powercontrol(int port, int to)
{
pr_debug("usb_simtec_powercontrol(%d,%d)\n", port, to);
power_state[port] = to;
if (power_state[0] && power_state[1])
gpio_set_value(S3C2410_GPB(4), 0);
else
gpio_set_value(S3C2410_GPB(4), 1);
}
static irqreturn_t
usb_simtec_ocirq(int irq, void *pw)
{
struct s3c2410_hcd_info *info = pw;
if (gpio_get_value(S3C2410_GPG(10)) == 0) {
pr_debug("usb_simtec: over-current irq (oc detected)\n");
s3c2410_usb_report_oc(info, 3);
} else {
pr_debug("usb_simtec: over-current irq (oc cleared)\n");
s3c2410_usb_report_oc(info, 0);
}
return IRQ_HANDLED;
}
static void usb_simtec_enableoc(struct s3c2410_hcd_info *info, int on)
{
int ret;
if (on) {
ret = request_irq(IRQ_USBOC, usb_simtec_ocirq,
IRQF_DISABLED | IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
"USB Over-current", info);
if (ret != 0) {
printk(KERN_ERR "failed to request usb oc irq\n");
}
} else {
free_irq(IRQ_USBOC, info);
}
}
static struct s3c2410_hcd_info usb_simtec_info __initdata = {
.port[0] = {
.flags = S3C_HCDFLG_USED
},
.port[1] = {
.flags = S3C_HCDFLG_USED
},
.power_control = usb_simtec_powercontrol,
.enable_oc = usb_simtec_enableoc,
};
int usb_simtec_init(void)
{
int ret;
printk("USB Power Control, Copyright 2004 Simtec Electronics\n");
ret = gpio_request(S3C2410_GPB(4), "USB power control");
if (ret < 0) {
pr_err("%s: failed to get GPB4\n", __func__);
return ret;
}
ret = gpio_request(S3C2410_GPG(10), "USB overcurrent");
if (ret < 0) {
pr_err("%s: failed to get GPG10\n", __func__);
gpio_free(S3C2410_GPB(4));
return ret;
}
/* turn power on */
gpio_direction_output(S3C2410_GPB(4), 1);
gpio_direction_input(S3C2410_GPG(10));
s3c_ohci_set_platdata(&usb_simtec_info);
return 0;
}
| gpl-2.0 |
MatthewBooth/JFLTE-GPE-Kernel | arch/arm/mach-kirkwood/board-dt.c | 4700 | 1979 | /*
* Copyright 2012 (C), Jason Cooper <jason@lakedaemon.net>
*
* arch/arm/mach-kirkwood/board-dt.c
*
* Flattened Device Tree board initialization
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/kexec.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <mach/bridge-regs.h>
#include "common.h"
static struct of_device_id kirkwood_dt_match_table[] __initdata = {
{ .compatible = "simple-bus", },
{ }
};
static void __init kirkwood_dt_init(void)
{
pr_info("Kirkwood: %s, TCLK=%d.\n", kirkwood_id(), kirkwood_tclk);
/*
* Disable propagation of mbus errors to the CPU local bus,
* as this causes mbus errors (which can occur for example
* for PCI aborts) to throw CPU aborts, which we're not set
* up to deal with.
*/
writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
kirkwood_setup_cpu_mbus();
#ifdef CONFIG_CACHE_FEROCEON_L2
kirkwood_l2_init();
#endif
/* internal devices that every board has */
kirkwood_wdt_init();
kirkwood_xor0_init();
kirkwood_xor1_init();
kirkwood_crypto_init();
#ifdef CONFIG_KEXEC
kexec_reinit = kirkwood_enable_pcie;
#endif
if (of_machine_is_compatible("globalscale,dreamplug"))
dreamplug_init();
of_platform_populate(NULL, kirkwood_dt_match_table, NULL, NULL);
}
static const char *kirkwood_dt_board_compat[] = {
"globalscale,dreamplug",
NULL
};
DT_MACHINE_START(KIRKWOOD_DT, "Marvell Kirkwood (Flattened Device Tree)")
/* Maintainer: Jason Cooper <jason@lakedaemon.net> */
.map_io = kirkwood_map_io,
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
.init_machine = kirkwood_dt_init,
.restart = kirkwood_restart,
.dt_compat = kirkwood_dt_board_compat,
MACHINE_END
| gpl-2.0 |
RealVNC/Android-kernel-mako-NCM | drivers/media/video/gspca/spca508.c | 4956 | 41826 | /*
* SPCA508 chip based cameras subdriver
*
* Copyright (C) 2009 Jean-Francois Moine <http://moinejf.free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "spca508"
#include "gspca.h"
MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/SPCA508 USB Camera Driver");
MODULE_LICENSE("GPL");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
u8 brightness;
u8 subtype;
#define CreativeVista 0
#define HamaUSBSightcam 1
#define HamaUSBSightcam2 2
#define IntelEasyPCCamera 3
#define MicroInnovationIC200 4
#define ViewQuestVQ110 5
};
/* V4L2 controls supported by the driver */
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
.minimum = 0,
.maximum = 255,
.step = 1,
#define BRIGHTNESS_DEF 128
.default_value = BRIGHTNESS_DEF,
},
.set = sd_setbrightness,
.get = sd_getbrightness,
},
};
static const struct v4l2_pix_format sif_mode[] = {
{160, 120, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
.bytesperline = 160,
.sizeimage = 160 * 120 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 3},
{176, 144, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
{320, 240, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{352, 288, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
/* Frame packet header offsets for the spca508 */
#define SPCA508_OFFSET_DATA 37
/*
* Initialization data: this is the first set-up data written to the
* device (before the open data).
*/
static const u16 spca508_init_data[][2] = {
{0x0000, 0x870b},
{0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
{0x0003, 0x8111}, /* Reset compression & memory */
{0x0000, 0x8110}, /* Disable all outputs */
/* READ {0x0000, 0x8114} -> 0000: 00 */
{0x0000, 0x8114}, /* SW GPIO data */
{0x0008, 0x8110}, /* Enable charge pump output */
{0x0002, 0x8116}, /* 200 kHz pump clock */
/* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE:) */
{0x0003, 0x8111}, /* Reset compression & memory */
{0x0000, 0x8111}, /* Normal mode (not reset) */
{0x0098, 0x8110},
/* Enable charge pump output, sync.serial,external 2x clock */
{0x000d, 0x8114}, /* SW GPIO data */
{0x0002, 0x8116}, /* 200 kHz pump clock */
{0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
/* --------------------------------------- */
{0x000f, 0x8402}, /* memory bank */
{0x0000, 0x8403}, /* ... address */
/* --------------------------------------- */
/* 0x88__ is Synchronous Serial Interface. */
/* TBD: This table could be expressed more compactly */
/* using spca508_write_i2c_vector(). */
/* TBD: Should see if the values in spca50x_i2c_data */
/* would work with the VQ110 instead of the values */
/* below. */
{0x00c0, 0x8804}, /* SSI slave addr */
{0x0008, 0x8802}, /* 375 Khz SSI clock */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802}, /* 375 Khz SSI clock */
{0x0012, 0x8801}, /* SSI reg addr */
{0x0080, 0x8800}, /* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802}, /* 375 Khz SSI clock */
{0x0012, 0x8801}, /* SSI reg addr */
{0x0000, 0x8800}, /* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802}, /* 375 Khz SSI clock */
{0x0011, 0x8801}, /* SSI reg addr */
{0x0040, 0x8800}, /* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0013, 0x8801},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0014, 0x8801},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0015, 0x8801},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0016, 0x8801},
{0x0003, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0017, 0x8801},
{0x0036, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0018, 0x8801},
{0x00ec, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x001a, 0x8801},
{0x0094, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x001b, 0x8801},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0027, 0x8801},
{0x00a2, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0028, 0x8801},
{0x0040, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x002a, 0x8801},
{0x0084, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x002b, 0x8801},
{0x00a8, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x002c, 0x8801},
{0x00fe, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x002d, 0x8801},
{0x0003, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0038, 0x8801},
{0x0083, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0033, 0x8801},
{0x0081, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0034, 0x8801},
{0x004a, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0039, 0x8801},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0010, 0x8801},
{0x00a8, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0006, 0x8801},
{0x0058, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0000, 0x8801},
{0x0004, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0040, 0x8801},
{0x0080, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0041, 0x8801},
{0x000c, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0042, 0x8801},
{0x000c, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0043, 0x8801},
{0x0028, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0044, 0x8801},
{0x0080, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0045, 0x8801},
{0x0020, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0046, 0x8801},
{0x0020, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0047, 0x8801},
{0x0080, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0048, 0x8801},
{0x004c, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0049, 0x8801},
{0x0084, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x004a, 0x8801},
{0x0084, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x004b, 0x8801},
{0x0084, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* --------------------------------------- */
{0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
{0x0000, 0x8701}, /* CKx1 clock delay adj */
{0x0000, 0x8701}, /* CKx1 clock delay adj */
{0x0001, 0x870c}, /* CKOx2 output */
/* --------------------------------------- */
{0x0080, 0x8600}, /* Line memory read counter (L) */
{0x0001, 0x8606}, /* reserved */
{0x0064, 0x8607}, /* Line memory read counter (H) 0x6480=25,728 */
{0x002a, 0x8601}, /* CDSP sharp interpolation mode,
* line sel for color sep, edge enhance enab */
{0x0000, 0x8602}, /* optical black level for user settng = 0 */
{0x0080, 0x8600}, /* Line memory read counter (L) */
{0x000a, 0x8603}, /* optical black level calc mode:
* auto; optical black offset = 10 */
{0x00df, 0x865b}, /* Horiz offset for valid pixels (L)=0xdf */
{0x0012, 0x865c}, /* Vert offset for valid lines (L)=0x12 */
/* The following two lines seem to be the "wrong" resolution. */
/* But perhaps these indicate the actual size of the sensor */
/* rather than the size of the current video mode. */
{0x0058, 0x865d}, /* Horiz valid pixels (*4) (L) = 352 */
{0x0048, 0x865e}, /* Vert valid lines (*4) (L) = 288 */
{0x0015, 0x8608}, /* A11 Coef ... */
{0x0030, 0x8609},
{0x00fb, 0x860a},
{0x003e, 0x860b},
{0x00ce, 0x860c},
{0x00f4, 0x860d},
{0x00eb, 0x860e},
{0x00dc, 0x860f},
{0x0039, 0x8610},
{0x0001, 0x8611}, /* R offset for white balance ... */
{0x0000, 0x8612},
{0x0001, 0x8613},
{0x0000, 0x8614},
{0x005b, 0x8651}, /* R gain for white balance ... */
{0x0040, 0x8652},
{0x0060, 0x8653},
{0x0040, 0x8654},
{0x0000, 0x8655},
{0x0001, 0x863f}, /* Fixed gamma correction enable, USB control,
* lum filter disable, lum noise clip disable */
{0x00a1, 0x8656}, /* Window1 size 256x256, Windows2 size 64x64,
* gamma look-up disable,
* new edge enhancement enable */
{0x0018, 0x8657}, /* Edge gain high thresh */
{0x0020, 0x8658}, /* Edge gain low thresh */
{0x000a, 0x8659}, /* Edge bandwidth high threshold */
{0x0005, 0x865a}, /* Edge bandwidth low threshold */
/* -------------------------------- */
{0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0xa908, 0x8802},
{0x0034, 0x8801}, /* SSI reg addr */
{0x00ca, 0x8800},
/* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x1f08, 0x8802},
{0x0006, 0x8801},
{0x0080, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* ----- Read back coefs we wrote earlier. */
/* READ { 0x0000, 0x8608 } -> 0000: 15 */
/* READ { 0x0000, 0x8609 } -> 0000: 30 */
/* READ { 0x0000, 0x860a } -> 0000: fb */
/* READ { 0x0000, 0x860b } -> 0000: 3e */
/* READ { 0x0000, 0x860c } -> 0000: ce */
/* READ { 0x0000, 0x860d } -> 0000: f4 */
/* READ { 0x0000, 0x860e } -> 0000: eb */
/* READ { 0x0000, 0x860f } -> 0000: dc */
/* READ { 0x0000, 0x8610 } -> 0000: 39 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0xb008, 0x8802},
{0x0006, 0x8801},
{0x007d, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* This chunk is seemingly redundant with */
/* earlier commands (A11 Coef...), but if I disable it, */
/* the image appears too dark. Maybe there was some kind of */
/* reset since the earlier commands, so this is necessary again. */
{0x0015, 0x8608},
{0x0030, 0x8609},
{0xfffb, 0x860a},
{0x003e, 0x860b},
{0xffce, 0x860c},
{0xfff4, 0x860d},
{0xffeb, 0x860e},
{0xffdc, 0x860f},
{0x0039, 0x8610},
{0x0018, 0x8657},
{0x0000, 0x8508}, /* Disable compression. */
/* Previous line was:
{0x0021, 0x8508}, * Enable compression. */
{0x0032, 0x850b}, /* compression stuff */
{0x0003, 0x8509}, /* compression stuff */
{0x0011, 0x850a}, /* compression stuff */
{0x0021, 0x850d}, /* compression stuff */
{0x0010, 0x850c}, /* compression stuff */
{0x0003, 0x8500}, /* *** Video mode: 160x120 */
{0x0001, 0x8501}, /* Hardware-dominated snap control */
{0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128,
* gamma look-up disable,
* new edge enhancement enable */
{0x0018, 0x8617}, /* Window1 start X (*2) */
{0x0008, 0x8618}, /* Window1 start Y (*2) */
{0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128,
* gamma look-up disable,
* new edge enhancement enable */
{0x0058, 0x8619}, /* Window2 start X (*2) */
{0x0008, 0x861a}, /* Window2 start Y (*2) */
{0x00ff, 0x8615}, /* High lum thresh for white balance */
{0x0000, 0x8616}, /* Low lum thresh for white balance */
{0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
{0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
/* READ { 0x0000, 0x8656 } -> 0000: 61 */
{0x0028, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 28 */
{0x1f28, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
{0x0010, 0x8801}, /* SSI reg addr */
{0x003e, 0x8800}, /* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
{0x0028, 0x8802},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 28 */
{0x1f28, 0x8802},
{0x0000, 0x8801},
{0x001f, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
{0x0001, 0x8602}, /* optical black level for user settning = 1 */
/* Original: */
{0x0023, 0x8700}, /* Clock speed 48Mhz/(3+2)/4= 2.4 Mhz */
{0x000f, 0x8602}, /* optical black level for user settning = 15 */
{0x0028, 0x8802},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 28 */
{0x1f28, 0x8802},
{0x0010, 0x8801},
{0x007b, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
{0x002f, 0x8651}, /* R gain for white balance ... */
{0x0080, 0x8653},
/* READ { 0x0000, 0x8655 } -> 0000: 00 */
{0x0000, 0x8655},
{0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */
{0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
/* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE: (ALT=0) ) */
{}
};
/*
* Initialization data for Intel EasyPC Camera CS110
*/
static const u16 spca508cs110_init_data[][2] = {
{0x0000, 0x870b}, /* Reset CTL3 */
{0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
{0x0000, 0x8111}, /* Normal operation on reset */
{0x0090, 0x8110},
/* External Clock 2x & Synchronous Serial Interface Output */
{0x0020, 0x8112}, /* Video Drop packet enable */
{0x0000, 0x8114}, /* Software GPIO output data */
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
/* Initial sequence Synchronous Serial Interface */
{0x000f, 0x8402}, /* Memory bank Address */
{0x0000, 0x8403}, /* Memory bank Address */
{0x00ba, 0x8804}, /* SSI Slave address */
{0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */
{0x0010, 0x8802}, /* 93.75kHz SSI Clock two DataByte */
{0x0001, 0x8801},
{0x000a, 0x8805}, /* a - NWG: Dunno what this is about */
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0002, 0x8801},
{0x0000, 0x8805},
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0003, 0x8801},
{0x0027, 0x8805},
{0x0001, 0x8800},
{0x0010, 0x8802},
{0x0004, 0x8801},
{0x0065, 0x8805},
{0x0001, 0x8800},
{0x0010, 0x8802},
{0x0005, 0x8801},
{0x0003, 0x8805},
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0006, 0x8801},
{0x001c, 0x8805},
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0007, 0x8801},
{0x002a, 0x8805},
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0002, 0x8704}, /* External input CKIx1 */
{0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
{0x009a, 0x8600}, /* Line memory Read Counter (L) */
{0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */
{0x0003, 0x865c}, /* 3 Vertical Offset for Valid Lines(L) */
{0x0058, 0x865d}, /* 58 Horizontal Valid Pixel Window(L) */
{0x0006, 0x8660}, /* Nibble data + input order */
{0x000a, 0x8602}, /* Optical black level set to 0x0a */
{0x0000, 0x8603}, /* Optical black level Offset */
/* {0x0000, 0x8611}, * 0 R Offset for white Balance */
/* {0x0000, 0x8612}, * 1 Gr Offset for white Balance */
/* {0x0000, 0x8613}, * 1f B Offset for white Balance */
/* {0x0000, 0x8614}, * f0 Gb Offset for white Balance */
{0x0040, 0x8651}, /* 2b BLUE gain for white balance good at all 60 */
{0x0030, 0x8652}, /* 41 Gr Gain for white Balance (L) */
{0x0035, 0x8653}, /* 26 RED gain for white balance */
{0x0035, 0x8654}, /* 40Gb Gain for white Balance (L) */
{0x0041, 0x863f},
/* Fixed Gamma correction enabled (makes colours look better) */
{0x0000, 0x8655},
/* High bits for white balance*****brightness control*** */
{}
};
static const u16 spca508_sightcam_init_data[][2] = {
/* This line seems to setup the frame/canvas */
{0x000f, 0x8402},
/* These 6 lines are needed to startup the webcam */
{0x0090, 0x8110},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
{0x0080, 0x8804},
/* This part seems to make the pictures darker? (autobrightness?) */
{0x0001, 0x8801},
{0x0004, 0x8800},
{0x0003, 0x8801},
{0x00e0, 0x8800},
{0x0004, 0x8801},
{0x00b4, 0x8800},
{0x0005, 0x8801},
{0x0000, 0x8800},
{0x0006, 0x8801},
{0x00e0, 0x8800},
{0x0007, 0x8801},
{0x000c, 0x8800},
/* This section is just needed, it probably
* does something like the previous section,
* but the cam won't start if it's not included.
*/
{0x0014, 0x8801},
{0x0008, 0x8800},
{0x0015, 0x8801},
{0x0067, 0x8800},
{0x0016, 0x8801},
{0x0000, 0x8800},
{0x0017, 0x8801},
{0x0020, 0x8800},
{0x0018, 0x8801},
{0x0044, 0x8800},
/* Makes the picture darker - and the
* cam won't start if not included
*/
{0x001e, 0x8801},
{0x00ea, 0x8800},
{0x001f, 0x8801},
{0x0001, 0x8800},
{0x0003, 0x8801},
{0x00e0, 0x8800},
/* seems to place the colors ontop of each other #1 */
{0x0006, 0x8704},
{0x0001, 0x870c},
{0x0016, 0x8600},
{0x0002, 0x8606},
/* if not included the pictures becomes _very_ dark */
{0x0064, 0x8607},
{0x003a, 0x8601},
{0x0000, 0x8602},
/* seems to place the colors ontop of each other #2 */
{0x0016, 0x8600},
{0x0018, 0x8617},
{0x0008, 0x8618},
{0x00a1, 0x8656},
/* webcam won't start if not included */
{0x0007, 0x865b},
{0x0001, 0x865c},
{0x0058, 0x865d},
{0x0048, 0x865e},
/* adjusts the colors */
{0x0049, 0x8651},
{0x0040, 0x8652},
{0x004c, 0x8653},
{0x0040, 0x8654},
{}
};
static const u16 spca508_sightcam2_init_data[][2] = {
{0x0020, 0x8112},
{0x000f, 0x8402},
{0x0000, 0x8403},
{0x0008, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0009, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000a, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000b, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000c, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000d, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000e, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0007, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000f, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0018, 0x8660},
{0x0010, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0011, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0000, 0x86b0},
{0x0034, 0x86b1},
{0x0000, 0x86b2},
{0x0049, 0x86b3},
{0x0000, 0x86b4},
{0x0000, 0x86b4},
{0x0012, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0013, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0001, 0x86b0},
{0x00aa, 0x86b1},
{0x0000, 0x86b2},
{0x00e4, 0x86b3},
{0x0000, 0x86b4},
{0x0000, 0x86b4},
{0x0018, 0x8660},
{0x0090, 0x8110},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
{0x0080, 0x8804},
{0x0003, 0x8801},
{0x0012, 0x8800},
{0x0004, 0x8801},
{0x0005, 0x8800},
{0x0005, 0x8801},
{0x0000, 0x8800},
{0x0006, 0x8801},
{0x0000, 0x8800},
{0x0007, 0x8801},
{0x0000, 0x8800},
{0x0008, 0x8801},
{0x0005, 0x8800},
{0x000a, 0x8700},
{0x000e, 0x8801},
{0x0004, 0x8800},
{0x0005, 0x8801},
{0x0047, 0x8800},
{0x0006, 0x8801},
{0x0000, 0x8800},
{0x0007, 0x8801},
{0x00c0, 0x8800},
{0x0008, 0x8801},
{0x0003, 0x8800},
{0x0013, 0x8801},
{0x0001, 0x8800},
{0x0009, 0x8801},
{0x0000, 0x8800},
{0x000a, 0x8801},
{0x0000, 0x8800},
{0x000b, 0x8801},
{0x0000, 0x8800},
{0x000c, 0x8801},
{0x0000, 0x8800},
{0x000e, 0x8801},
{0x0004, 0x8800},
{0x000f, 0x8801},
{0x0000, 0x8800},
{0x0010, 0x8801},
{0x0006, 0x8800},
{0x0011, 0x8801},
{0x0006, 0x8800},
{0x0012, 0x8801},
{0x0000, 0x8800},
{0x0013, 0x8801},
{0x0001, 0x8800},
{0x000a, 0x8700},
{0x0000, 0x8702},
{0x0000, 0x8703},
{0x00c2, 0x8704},
{0x0001, 0x870c},
{0x0044, 0x8600},
{0x0002, 0x8606},
{0x0064, 0x8607},
{0x003a, 0x8601},
{0x0008, 0x8602},
{0x0044, 0x8600},
{0x0018, 0x8617},
{0x0008, 0x8618},
{0x00a1, 0x8656},
{0x0004, 0x865b},
{0x0002, 0x865c},
{0x0058, 0x865d},
{0x0048, 0x865e},
{0x0012, 0x8608},
{0x002c, 0x8609},
{0x0002, 0x860a},
{0x002c, 0x860b},
{0x00db, 0x860c},
{0x00f9, 0x860d},
{0x00f1, 0x860e},
{0x00e3, 0x860f},
{0x002c, 0x8610},
{0x006c, 0x8651},
{0x0041, 0x8652},
{0x0059, 0x8653},
{0x0040, 0x8654},
{0x00fa, 0x8611},
{0x00ff, 0x8612},
{0x00f8, 0x8613},
{0x0000, 0x8614},
{0x0001, 0x863f},
{0x0000, 0x8640},
{0x0026, 0x8641},
{0x0045, 0x8642},
{0x0060, 0x8643},
{0x0075, 0x8644},
{0x0088, 0x8645},
{0x009b, 0x8646},
{0x00b0, 0x8647},
{0x00c5, 0x8648},
{0x00d2, 0x8649},
{0x00dc, 0x864a},
{0x00e5, 0x864b},
{0x00eb, 0x864c},
{0x00f0, 0x864d},
{0x00f6, 0x864e},
{0x00fa, 0x864f},
{0x00ff, 0x8650},
{0x0060, 0x8657},
{0x0010, 0x8658},
{0x0018, 0x8659},
{0x0005, 0x865a},
{0x0018, 0x8660},
{0x0003, 0x8509},
{0x0011, 0x850a},
{0x0032, 0x850b},
{0x0010, 0x850c},
{0x0021, 0x850d},
{0x0001, 0x8500},
{0x0000, 0x8508},
{0x0012, 0x8608},
{0x002c, 0x8609},
{0x0002, 0x860a},
{0x0039, 0x860b},
{0x00d0, 0x860c},
{0x00f7, 0x860d},
{0x00ed, 0x860e},
{0x00db, 0x860f},
{0x0039, 0x8610},
{0x0012, 0x8657},
{0x000c, 0x8619},
{0x0004, 0x861a},
{0x00a1, 0x8656},
{0x00c8, 0x8615},
{0x0032, 0x8616},
{0x0030, 0x8112},
{0x0020, 0x8112},
{0x0020, 0x8112},
{0x000f, 0x8402},
{0x0000, 0x8403},
{0x0090, 0x8110},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
{0x0080, 0x8804},
{0x0003, 0x8801},
{0x0012, 0x8800},
{0x0004, 0x8801},
{0x0005, 0x8800},
{0x0005, 0x8801},
{0x0047, 0x8800},
{0x0006, 0x8801},
{0x0000, 0x8800},
{0x0007, 0x8801},
{0x00c0, 0x8800},
{0x0008, 0x8801},
{0x0003, 0x8800},
{0x000a, 0x8700},
{0x000e, 0x8801},
{0x0004, 0x8800},
{0x0005, 0x8801},
{0x0047, 0x8800},
{0x0006, 0x8801},
{0x0000, 0x8800},
{0x0007, 0x8801},
{0x00c0, 0x8800},
{0x0008, 0x8801},
{0x0003, 0x8800},
{0x0013, 0x8801},
{0x0001, 0x8800},
{0x0009, 0x8801},
{0x0000, 0x8800},
{0x000a, 0x8801},
{0x0000, 0x8800},
{0x000b, 0x8801},
{0x0000, 0x8800},
{0x000c, 0x8801},
{0x0000, 0x8800},
{0x000e, 0x8801},
{0x0004, 0x8800},
{0x000f, 0x8801},
{0x0000, 0x8800},
{0x0010, 0x8801},
{0x0006, 0x8800},
{0x0011, 0x8801},
{0x0006, 0x8800},
{0x0012, 0x8801},
{0x0000, 0x8800},
{0x0013, 0x8801},
{0x0001, 0x8800},
{0x000a, 0x8700},
{0x0000, 0x8702},
{0x0000, 0x8703},
{0x00c2, 0x8704},
{0x0001, 0x870c},
{0x0044, 0x8600},
{0x0002, 0x8606},
{0x0064, 0x8607},
{0x003a, 0x8601},
{0x0008, 0x8602},
{0x0044, 0x8600},
{0x0018, 0x8617},
{0x0008, 0x8618},
{0x00a1, 0x8656},
{0x0004, 0x865b},
{0x0002, 0x865c},
{0x0058, 0x865d},
{0x0048, 0x865e},
{0x0012, 0x8608},
{0x002c, 0x8609},
{0x0002, 0x860a},
{0x002c, 0x860b},
{0x00db, 0x860c},
{0x00f9, 0x860d},
{0x00f1, 0x860e},
{0x00e3, 0x860f},
{0x002c, 0x8610},
{0x006c, 0x8651},
{0x0041, 0x8652},
{0x0059, 0x8653},
{0x0040, 0x8654},
{0x00fa, 0x8611},
{0x00ff, 0x8612},
{0x00f8, 0x8613},
{0x0000, 0x8614},
{0x0001, 0x863f},
{0x0000, 0x8640},
{0x0026, 0x8641},
{0x0045, 0x8642},
{0x0060, 0x8643},
{0x0075, 0x8644},
{0x0088, 0x8645},
{0x009b, 0x8646},
{0x00b0, 0x8647},
{0x00c5, 0x8648},
{0x00d2, 0x8649},
{0x00dc, 0x864a},
{0x00e5, 0x864b},
{0x00eb, 0x864c},
{0x00f0, 0x864d},
{0x00f6, 0x864e},
{0x00fa, 0x864f},
{0x00ff, 0x8650},
{0x0060, 0x8657},
{0x0010, 0x8658},
{0x0018, 0x8659},
{0x0005, 0x865a},
{0x0018, 0x8660},
{0x0003, 0x8509},
{0x0011, 0x850a},
{0x0032, 0x850b},
{0x0010, 0x850c},
{0x0021, 0x850d},
{0x0001, 0x8500},
{0x0000, 0x8508},
{0x0012, 0x8608},
{0x002c, 0x8609},
{0x0002, 0x860a},
{0x0039, 0x860b},
{0x00d0, 0x860c},
{0x00f7, 0x860d},
{0x00ed, 0x860e},
{0x00db, 0x860f},
{0x0039, 0x8610},
{0x0012, 0x8657},
{0x0064, 0x8619},
/* This line starts it all, it is not needed here */
/* since it has been build into the driver */
/* jfm: don't start now */
/* {0x0030, 0x8112}, */
{}
};
/*
* Initialization data for Creative Webcam Vista
*/
static const u16 spca508_vista_init_data[][2] = {
{0x0008, 0x8200}, /* Clear register */
{0x0000, 0x870b}, /* Reset CTL3 */
{0x0020, 0x8112}, /* Video Drop packet enable */
{0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
{0x0000, 0x8110}, /* Disable everything */
{0x0000, 0x8114}, /* Software GPIO output data */
{0x0000, 0x8114},
{0x0003, 0x8111},
{0x0000, 0x8111},
{0x0090, 0x8110}, /* Enable: SSI output, External 2X clock output */
{0x0020, 0x8112},
{0x0000, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
{0x000f, 0x8402}, /* Memory bank Address */
{0x0000, 0x8403}, /* Memory bank Address */
{0x00ba, 0x8804}, /* SSI Slave address */
{0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802}, /* Will write 2 bytes (DATA1+DATA2) */
{0x0020, 0x8801}, /* Register address for SSI read/write */
{0x0044, 0x8805}, /* DATA2 */
{0x0004, 0x8800}, /* DATA1 -> write triggered */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0009, 0x8801},
{0x0042, 0x8805},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x003c, 0x8801},
{0x0001, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0001, 0x8801},
{0x000a, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0002, 0x8801},
{0x0000, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0003, 0x8801},
{0x0027, 0x8805},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0004, 0x8801},
{0x0065, 0x8805},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0005, 0x8801},
{0x0003, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0006, 0x8801},
{0x001c, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0007, 0x8801},
{0x002a, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x000e, 0x8801},
{0x0000, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0028, 0x8801},
{0x002e, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0039, 0x8801},
{0x0013, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x003b, 0x8801},
{0x000c, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0035, 0x8801},
{0x0028, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0009, 0x8801},
{0x0042, 0x8805},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
{0x0050, 0x8703},
{0x0002, 0x8704}, /* External input CKIx1 */
{0x0001, 0x870c}, /* Select CKOx2 output */
{0x009a, 0x8600}, /* Line memory Read Counter (L) */
{0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
{0x0023, 0x8601},
{0x0010, 0x8602},
{0x000a, 0x8603},
{0x009a, 0x8600},
{0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */
{0x0003, 0x865c}, /* Vertical offset for valid lines (L) */
{0x0058, 0x865d}, /* Horizontal valid pixels window (L) */
{0x0048, 0x865e}, /* Vertical valid lines window (L) */
{0x0000, 0x865f},
{0x0006, 0x8660},
/* Enable nibble data input, select nibble input order */
{0x0013, 0x8608}, /* A11 Coeficients for color correction */
{0x0028, 0x8609},
/* Note: these values are confirmed at the end of array */
{0x0005, 0x860a}, /* ... */
{0x0025, 0x860b},
{0x00e1, 0x860c},
{0x00fa, 0x860d},
{0x00f4, 0x860e},
{0x00e8, 0x860f},
{0x0025, 0x8610}, /* A33 Coef. */
{0x00fc, 0x8611}, /* White balance offset: R */
{0x0001, 0x8612}, /* White balance offset: Gr */
{0x00fe, 0x8613}, /* White balance offset: B */
{0x0000, 0x8614}, /* White balance offset: Gb */
{0x0064, 0x8651}, /* R gain for white balance (L) */
{0x0040, 0x8652}, /* Gr gain for white balance (L) */
{0x0066, 0x8653}, /* B gain for white balance (L) */
{0x0040, 0x8654}, /* Gb gain for white balance (L) */
{0x0001, 0x863f}, /* Enable fixed gamma correction */
{0x00a1, 0x8656}, /* Size - Window1: 256x256, Window2: 128x128,
* UV division: UV no change,
* Enable New edge enhancement */
{0x0018, 0x8657}, /* Edge gain high threshold */
{0x0020, 0x8658}, /* Edge gain low threshold */
{0x000a, 0x8659}, /* Edge bandwidth high threshold */
{0x0005, 0x865a}, /* Edge bandwidth low threshold */
{0x0064, 0x8607}, /* UV filter enable */
{0x0016, 0x8660},
{0x0000, 0x86b0}, /* Bad pixels compensation address */
{0x00dc, 0x86b1}, /* X coord for bad pixels compensation (L) */
{0x0000, 0x86b2},
{0x0009, 0x86b3}, /* Y coord for bad pixels compensation (L) */
{0x0000, 0x86b4},
{0x0001, 0x86b0},
{0x00f5, 0x86b1},
{0x0000, 0x86b2},
{0x00c6, 0x86b3},
{0x0000, 0x86b4},
{0x0002, 0x86b0},
{0x001c, 0x86b1},
{0x0001, 0x86b2},
{0x00d7, 0x86b3},
{0x0000, 0x86b4},
{0x0003, 0x86b0},
{0x001c, 0x86b1},
{0x0001, 0x86b2},
{0x00d8, 0x86b3},
{0x0000, 0x86b4},
{0x0004, 0x86b0},
{0x001d, 0x86b1},
{0x0001, 0x86b2},
{0x00d8, 0x86b3},
{0x0000, 0x86b4},
{0x001e, 0x8660},
/* READ { 0x0000, 0x8608 } -> 0000: 13 */
/* READ { 0x0000, 0x8609 } -> 0000: 28 */
/* READ { 0x0000, 0x8610 } -> 0000: 05 */
/* READ { 0x0000, 0x8611 } -> 0000: 25 */
/* READ { 0x0000, 0x8612 } -> 0000: e1 */
/* READ { 0x0000, 0x8613 } -> 0000: fa */
/* READ { 0x0000, 0x8614 } -> 0000: f4 */
/* READ { 0x0000, 0x8615 } -> 0000: e8 */
/* READ { 0x0000, 0x8616 } -> 0000: 25 */
{}
};
static int reg_write(struct usb_device *dev,
u16 index, u16 value)
{
int ret;
ret = usb_control_msg(dev,
usb_sndctrlpipe(dev, 0),
0, /* request */
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
PDEBUG(D_USBO, "reg write i:0x%04x = 0x%02x",
index, value);
if (ret < 0)
pr_err("reg write: error %d\n", ret);
return ret;
}
/* read 1 byte */
/* returns: negative is error, pos or zero is data */
static int reg_read(struct gspca_dev *gspca_dev,
u16 index) /* wIndex */
{
int ret;
ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
0, /* register */
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index,
gspca_dev->usb_buf, 1,
500); /* timeout */
PDEBUG(D_USBI, "reg read i:%04x --> %02x",
index, gspca_dev->usb_buf[0]);
if (ret < 0) {
pr_err("reg_read err %d\n", ret);
return ret;
}
return gspca_dev->usb_buf[0];
}
/* send 1 or 2 bytes to the sensor via the Synchronous Serial Interface */
static int ssi_w(struct gspca_dev *gspca_dev,
u16 reg, u16 val)
{
struct usb_device *dev = gspca_dev->dev;
int ret, retry;
ret = reg_write(dev, 0x8802, reg >> 8);
if (ret < 0)
goto out;
ret = reg_write(dev, 0x8801, reg & 0x00ff);
if (ret < 0)
goto out;
if ((reg & 0xff00) == 0x1000) { /* if 2 bytes */
ret = reg_write(dev, 0x8805, val & 0x00ff);
if (ret < 0)
goto out;
val >>= 8;
}
ret = reg_write(dev, 0x8800, val);
if (ret < 0)
goto out;
/* poll until not busy */
retry = 10;
for (;;) {
ret = reg_read(gspca_dev, 0x8803);
if (ret < 0)
break;
if (gspca_dev->usb_buf[0] == 0)
break;
if (--retry <= 0) {
PDEBUG(D_ERR, "ssi_w busy %02x",
gspca_dev->usb_buf[0]);
ret = -1;
break;
}
msleep(8);
}
out:
return ret;
}
static int write_vector(struct gspca_dev *gspca_dev,
const u16 (*data)[2])
{
struct usb_device *dev = gspca_dev->dev;
int ret = 0;
while ((*data)[1] != 0) {
if ((*data)[1] & 0x8000) {
if ((*data)[1] == 0xdd00) /* delay */
msleep((*data)[0]);
else
ret = reg_write(dev, (*data)[1], (*data)[0]);
} else {
ret = ssi_w(gspca_dev, (*data)[1], (*data)[0]);
}
if (ret < 0)
break;
data++;
}
return ret;
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
const u16 (*init_data)[2];
static const u16 (*(init_data_tb[]))[2] = {
spca508_vista_init_data, /* CreativeVista 0 */
spca508_sightcam_init_data, /* HamaUSBSightcam 1 */
spca508_sightcam2_init_data, /* HamaUSBSightcam2 2 */
spca508cs110_init_data, /* IntelEasyPCCamera 3 */
spca508cs110_init_data, /* MicroInnovationIC200 4 */
spca508_init_data, /* ViewQuestVQ110 5 */
};
#ifdef GSPCA_DEBUG
int data1, data2;
/* Read from global register the USB product and vendor IDs, just to
* prove that we can communicate with the device. This works, which
* confirms at we are communicating properly and that the device
* is a 508. */
data1 = reg_read(gspca_dev, 0x8104);
data2 = reg_read(gspca_dev, 0x8105);
PDEBUG(D_PROBE, "Webcam Vendor ID: 0x%02x%02x", data2, data1);
data1 = reg_read(gspca_dev, 0x8106);
data2 = reg_read(gspca_dev, 0x8107);
PDEBUG(D_PROBE, "Webcam Product ID: 0x%02x%02x", data2, data1);
data1 = reg_read(gspca_dev, 0x8621);
PDEBUG(D_PROBE, "Window 1 average luminance: %d", data1);
#endif
cam = &gspca_dev->cam;
cam->cam_mode = sif_mode;
cam->nmodes = ARRAY_SIZE(sif_mode);
sd->subtype = id->driver_info;
sd->brightness = BRIGHTNESS_DEF;
init_data = init_data_tb[sd->subtype];
return write_vector(gspca_dev, init_data);
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
return 0;
}
static int sd_start(struct gspca_dev *gspca_dev)
{
int mode;
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
reg_write(gspca_dev->dev, 0x8500, mode);
switch (mode) {
case 0:
case 1:
reg_write(gspca_dev->dev, 0x8700, 0x28); /* clock */
break;
default:
/* case 2: */
/* case 3: */
reg_write(gspca_dev->dev, 0x8700, 0x23); /* clock */
break;
}
reg_write(gspca_dev->dev, 0x8112, 0x10 | 0x20);
return 0;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
/* Video ISO disable, Video Drop Packet enable: */
reg_write(gspca_dev->dev, 0x8112, 0x20);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
switch (data[0]) {
case 0: /* start of frame */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
data += SPCA508_OFFSET_DATA;
len -= SPCA508_OFFSET_DATA;
gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
break;
case 0xff: /* drop */
break;
default:
data += 1;
len -= 1;
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
break;
}
}
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 brightness = sd->brightness;
/* MX seem contrast */
reg_write(gspca_dev->dev, 0x8651, brightness);
reg_write(gspca_dev->dev, 0x8652, brightness);
reg_write(gspca_dev->dev, 0x8653, brightness);
reg_write(gspca_dev->dev, 0x8654, brightness);
}
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->brightness = val;
if (gspca_dev->streaming)
setbrightness(gspca_dev);
return 0;
}
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
*val = sd->brightness;
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
.nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam},
{USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista},
{USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110},
{USB_DEVICE(0x0af9, 0x0010), .driver_info = HamaUSBSightcam},
{USB_DEVICE(0x0af9, 0x0011), .driver_info = HamaUSBSightcam2},
{USB_DEVICE(0x8086, 0x0110), .driver_info = IntelEasyPCCamera},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
| gpl-2.0 |
xsynergy510x/GPE_Kernel | drivers/media/video/gspca/sn9c2028.c | 4956 | 25179 | /*
* SN9C2028 library
*
* Copyright (C) 2009 Theodore Kilgore <kilgota@auburn.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "sn9c2028"
#include "gspca.h"
MODULE_AUTHOR("Theodore Kilgore");
MODULE_DESCRIPTION("Sonix SN9C2028 USB Camera Driver");
MODULE_LICENSE("GPL");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
u8 sof_read;
u16 model;
};
struct init_command {
unsigned char instruction[6];
unsigned char to_read; /* length to read. 0 means no reply requested */
};
/* V4L2 controls supported by the driver */
static const struct ctrl sd_ctrls[] = {
};
/* How to change the resolution of any of the VGA cams is unknown */
static const struct v4l2_pix_format vga_mode[] = {
{640, 480, V4L2_PIX_FMT_SN9C2028, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 3 / 4,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
/* No way to change the resolution of the CIF cams is known */
static const struct v4l2_pix_format cif_mode[] = {
{352, 288, V4L2_PIX_FMT_SN9C2028, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 4,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
/* the bytes to write are in gspca_dev->usb_buf */
static int sn9c2028_command(struct gspca_dev *gspca_dev, u8 *command)
{
int rc;
PDEBUG(D_USBO, "sending command %02x%02x%02x%02x%02x%02x", command[0],
command[1], command[2], command[3], command[4], command[5]);
memcpy(gspca_dev->usb_buf, command, 6);
rc = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
USB_REQ_GET_CONFIGURATION,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
2, 0, gspca_dev->usb_buf, 6, 500);
if (rc < 0) {
pr_err("command write [%02x] error %d\n",
gspca_dev->usb_buf[0], rc);
return rc;
}
return 0;
}
static int sn9c2028_read1(struct gspca_dev *gspca_dev)
{
int rc;
rc = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
USB_REQ_GET_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1, 0, gspca_dev->usb_buf, 1, 500);
if (rc != 1) {
pr_err("read1 error %d\n", rc);
return (rc < 0) ? rc : -EIO;
}
PDEBUG(D_USBI, "read1 response %02x", gspca_dev->usb_buf[0]);
return gspca_dev->usb_buf[0];
}
static int sn9c2028_read4(struct gspca_dev *gspca_dev, u8 *reading)
{
int rc;
rc = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
USB_REQ_GET_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
4, 0, gspca_dev->usb_buf, 4, 500);
if (rc != 4) {
pr_err("read4 error %d\n", rc);
return (rc < 0) ? rc : -EIO;
}
memcpy(reading, gspca_dev->usb_buf, 4);
PDEBUG(D_USBI, "read4 response %02x%02x%02x%02x", reading[0],
reading[1], reading[2], reading[3]);
return rc;
}
static int sn9c2028_long_command(struct gspca_dev *gspca_dev, u8 *command)
{
int i, status;
__u8 reading[4];
status = sn9c2028_command(gspca_dev, command);
if (status < 0)
return status;
status = -1;
for (i = 0; i < 256 && status < 2; i++)
status = sn9c2028_read1(gspca_dev);
if (status != 2) {
pr_err("long command status read error %d\n", status);
return (status < 0) ? status : -EIO;
}
memset(reading, 0, 4);
status = sn9c2028_read4(gspca_dev, reading);
if (status < 0)
return status;
/* in general, the first byte of the response is the first byte of
* the command, or'ed with 8 */
status = sn9c2028_read1(gspca_dev);
if (status < 0)
return status;
return 0;
}
static int sn9c2028_short_command(struct gspca_dev *gspca_dev, u8 *command)
{
int err_code;
err_code = sn9c2028_command(gspca_dev, command);
if (err_code < 0)
return err_code;
err_code = sn9c2028_read1(gspca_dev);
if (err_code < 0)
return err_code;
return 0;
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam = &gspca_dev->cam;
PDEBUG(D_PROBE, "SN9C2028 camera detected (vid/pid 0x%04X:0x%04X)",
id->idVendor, id->idProduct);
sd->model = id->idProduct;
switch (sd->model) {
case 0x7005:
PDEBUG(D_PROBE, "Genius Smart 300 camera");
break;
case 0x8000:
PDEBUG(D_PROBE, "DC31VC");
break;
case 0x8001:
PDEBUG(D_PROBE, "Spy camera");
break;
case 0x8003:
PDEBUG(D_PROBE, "CIF camera");
break;
case 0x8008:
PDEBUG(D_PROBE, "Mini-Shotz ms-350 camera");
break;
case 0x800a:
PDEBUG(D_PROBE, "Vivitar 3350b type camera");
cam->input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP;
break;
}
switch (sd->model) {
case 0x8000:
case 0x8001:
case 0x8003:
cam->cam_mode = cif_mode;
cam->nmodes = ARRAY_SIZE(cif_mode);
break;
default:
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
}
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
int status = -1;
sn9c2028_read1(gspca_dev);
sn9c2028_read1(gspca_dev);
status = sn9c2028_read1(gspca_dev);
return (status < 0) ? status : 0;
}
static int run_start_commands(struct gspca_dev *gspca_dev,
struct init_command *cam_commands, int n)
{
int i, err_code = -1;
for (i = 0; i < n; i++) {
switch (cam_commands[i].to_read) {
case 4:
err_code = sn9c2028_long_command(gspca_dev,
cam_commands[i].instruction);
break;
case 1:
err_code = sn9c2028_short_command(gspca_dev,
cam_commands[i].instruction);
break;
case 0:
err_code = sn9c2028_command(gspca_dev,
cam_commands[i].instruction);
break;
}
if (err_code < 0)
return err_code;
}
return 0;
}
static int start_spy_cam(struct gspca_dev *gspca_dev)
{
struct init_command spy_start_commands[] = {
{{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x20, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x21, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x22, 0x01, 0x04, 0x00, 0x00}, 4},
{{0x13, 0x23, 0x01, 0x03, 0x00, 0x00}, 4},
{{0x13, 0x24, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 4}, /* width 352 */
{{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 4}, /* height 288 */
/* {{0x13, 0x27, 0x01, 0x28, 0x00, 0x00}, 4}, */
{{0x13, 0x27, 0x01, 0x68, 0x00, 0x00}, 4},
{{0x13, 0x28, 0x01, 0x09, 0x00, 0x00}, 4}, /* red gain ?*/
/* {{0x13, 0x28, 0x01, 0x00, 0x00, 0x00}, 4}, */
{{0x13, 0x29, 0x01, 0x00, 0x00, 0x00}, 4},
/* {{0x13, 0x29, 0x01, 0x0c, 0x00, 0x00}, 4}, */
{{0x13, 0x2a, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x2b, 0x01, 0x00, 0x00, 0x00}, 4},
/* {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, */
{{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x2d, 0x01, 0x02, 0x00, 0x00}, 4},
/* {{0x13, 0x2e, 0x01, 0x09, 0x00, 0x00}, 4}, */
{{0x13, 0x2e, 0x01, 0x09, 0x00, 0x00}, 4},
{{0x13, 0x2f, 0x01, 0x07, 0x00, 0x00}, 4},
{{0x12, 0x34, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x34, 0x01, 0xa1, 0x00, 0x00}, 4},
{{0x13, 0x35, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x02, 0x06, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x03, 0x13, 0x00, 0x00, 0x00}, 4}, /*don't mess with*/
/*{{0x11, 0x04, 0x06, 0x00, 0x00, 0x00}, 4}, observed */
{{0x11, 0x04, 0x00, 0x00, 0x00, 0x00}, 4}, /* brighter */
/*{{0x11, 0x05, 0x65, 0x00, 0x00, 0x00}, 4}, observed */
{{0x11, 0x05, 0x00, 0x00, 0x00, 0x00}, 4}, /* brighter */
{{0x11, 0x06, 0xb1, 0x00, 0x00, 0x00}, 4}, /* observed */
{{0x11, 0x07, 0x00, 0x00, 0x00, 0x00}, 4},
/*{{0x11, 0x08, 0x06, 0x00, 0x00, 0x00}, 4}, observed */
{{0x11, 0x08, 0x0b, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x09, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0a, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0b, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0c, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0d, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0e, 0x04, 0x00, 0x00, 0x00}, 4},
/* {{0x11, 0x0f, 0x00, 0x00, 0x00, 0x00}, 4}, */
/* brightness or gain. 0 is default. 4 is good
* indoors at night with incandescent lighting */
{{0x11, 0x0f, 0x04, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x10, 0x06, 0x00, 0x00, 0x00}, 4}, /*hstart or hoffs*/
{{0x11, 0x11, 0x06, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x12, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x14, 0x02, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x13, 0x01, 0x00, 0x00, 0x00}, 4},
/* {{0x1b, 0x02, 0x06, 0x00, 0x00, 0x00}, 1}, observed */
{{0x1b, 0x02, 0x11, 0x00, 0x00, 0x00}, 1}, /* brighter */
/* {{0x1b, 0x13, 0x01, 0x00, 0x00, 0x00}, 1}, observed */
{{0x1b, 0x13, 0x11, 0x00, 0x00, 0x00}, 1},
{{0x20, 0x34, 0xa1, 0x00, 0x00, 0x00}, 1}, /* compresses */
/* Camera should start to capture now. */
};
return run_start_commands(gspca_dev, spy_start_commands,
ARRAY_SIZE(spy_start_commands));
}
static int start_cif_cam(struct gspca_dev *gspca_dev)
{
struct init_command cif_start_commands[] = {
{{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4},
/* The entire sequence below seems redundant */
/* {{0x13, 0x20, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x21, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x22, 0x01, 0x06, 0x00, 0x00}, 4},
{{0x13, 0x23, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x24, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 4}, width?
{{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 4}, height?
{{0x13, 0x27, 0x01, 0x68, 0x00, 0x00}, 4}, subsample?
{{0x13, 0x28, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x29, 0x01, 0x20, 0x00, 0x00}, 4},
{{0x13, 0x2a, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x2b, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4},
{{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4},
{{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4},
{{0x12, 0x34, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x34, 0x01, 0xa1, 0x00, 0x00}, 4},
{{0x13, 0x35, 0x01, 0x00, 0x00, 0x00}, 4},*/
{{0x1b, 0x21, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x17, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x19, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x02, 0x06, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x03, 0x5a, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x04, 0x27, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x05, 0x01, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x12, 0x14, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x13, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x14, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x15, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x16, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x77, 0xa2, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x06, 0x0f, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x07, 0x14, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x08, 0x0f, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x09, 0x10, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x0e, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x0f, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x12, 0x07, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x10, 0x1f, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x11, 0x01, 0x00, 0x00, 0x00}, 1},
{{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 1}, /* width/8 */
{{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 1}, /* height/8 */
/* {{0x13, 0x27, 0x01, 0x68, 0x00, 0x00}, 4}, subsample?
* {{0x13, 0x28, 0x01, 0x1e, 0x00, 0x00}, 4}, does nothing
* {{0x13, 0x27, 0x01, 0x20, 0x00, 0x00}, 4}, */
/* {{0x13, 0x29, 0x01, 0x22, 0x00, 0x00}, 4},
* causes subsampling
* but not a change in the resolution setting! */
{{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x2d, 0x01, 0x01, 0x00, 0x00}, 4},
{{0x13, 0x2e, 0x01, 0x08, 0x00, 0x00}, 4},
{{0x13, 0x2f, 0x01, 0x06, 0x00, 0x00}, 4},
{{0x13, 0x28, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x1b, 0x04, 0x6d, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x05, 0x03, 0x00, 0x00, 0x00}, 1},
{{0x20, 0x36, 0x06, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x0e, 0x01, 0x00, 0x00, 0x00}, 1},
{{0x12, 0x27, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x1b, 0x0f, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x20, 0x36, 0x05, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x10, 0x0f, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x02, 0x06, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x11, 0x01, 0x00, 0x00, 0x00}, 1},
{{0x20, 0x34, 0xa1, 0x00, 0x00, 0x00}, 1},/* use compression */
/* Camera should start to capture now. */
};
return run_start_commands(gspca_dev, cif_start_commands,
ARRAY_SIZE(cif_start_commands));
}
static int start_ms350_cam(struct gspca_dev *gspca_dev)
{
struct init_command ms350_start_commands[] = {
{{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x16, 0x01, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x20, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x21, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x22, 0x01, 0x04, 0x00, 0x00}, 4},
{{0x13, 0x23, 0x01, 0x03, 0x00, 0x00}, 4},
{{0x13, 0x24, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 4},
{{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 4},
{{0x13, 0x27, 0x01, 0x28, 0x00, 0x00}, 4},
{{0x13, 0x28, 0x01, 0x09, 0x00, 0x00}, 4},
{{0x13, 0x29, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x2a, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x2b, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4},
{{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4},
{{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4},
{{0x12, 0x34, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x34, 0x01, 0xa1, 0x00, 0x00}, 4},
{{0x13, 0x35, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x00, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x01, 0x70, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x02, 0x05, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x03, 0x5d, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x04, 0x07, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x05, 0x25, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x06, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x07, 0x09, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x08, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x09, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0b, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0c, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0d, 0x0c, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0e, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x0f, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x10, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x11, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x12, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x13, 0x63, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x15, 0x70, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x18, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x11, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x25, 0x01, 0x28, 0x00, 0x00}, 4}, /* width */
{{0x13, 0x26, 0x01, 0x1e, 0x00, 0x00}, 4}, /* height */
{{0x13, 0x28, 0x01, 0x09, 0x00, 0x00}, 4}, /* vstart? */
{{0x13, 0x27, 0x01, 0x28, 0x00, 0x00}, 4},
{{0x13, 0x29, 0x01, 0x40, 0x00, 0x00}, 4}, /* hstart? */
{{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4},
{{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4},
{{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4},
{{0x1b, 0x02, 0x05, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x11, 0x01, 0x00, 0x00, 0x00}, 1},
{{0x20, 0x18, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x02, 0x0a, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x11, 0x01, 0x00, 0x00, 0x00}, 0},
/* Camera should start to capture now. */
};
return run_start_commands(gspca_dev, ms350_start_commands,
ARRAY_SIZE(ms350_start_commands));
}
static int start_genius_cam(struct gspca_dev *gspca_dev)
{
struct init_command genius_start_commands[] = {
{{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x16, 0x01, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x10, 0x00, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 4},
{{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 4},
/* "preliminary" width and height settings */
{{0x13, 0x28, 0x01, 0x0e, 0x00, 0x00}, 4},
{{0x13, 0x27, 0x01, 0x20, 0x00, 0x00}, 4},
{{0x13, 0x29, 0x01, 0x22, 0x00, 0x00}, 4},
{{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x2d, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x2e, 0x01, 0x09, 0x00, 0x00}, 4},
{{0x13, 0x2f, 0x01, 0x07, 0x00, 0x00}, 4},
{{0x11, 0x20, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x21, 0x2d, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x22, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x23, 0x03, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x10, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x11, 0x64, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x12, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x13, 0x91, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x14, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x15, 0x20, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x16, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x17, 0x60, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x20, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x21, 0x2d, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x22, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x23, 0x03, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x25, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x26, 0x02, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x27, 0x88, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x30, 0x38, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x31, 0x2a, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x32, 0x2a, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x33, 0x2a, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x34, 0x02, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x5b, 0x0a, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x25, 0x01, 0x28, 0x00, 0x00}, 4}, /* real width */
{{0x13, 0x26, 0x01, 0x1e, 0x00, 0x00}, 4}, /* real height */
{{0x13, 0x28, 0x01, 0x0e, 0x00, 0x00}, 4},
{{0x13, 0x27, 0x01, 0x20, 0x00, 0x00}, 4},
{{0x13, 0x29, 0x01, 0x62, 0x00, 0x00}, 4},
{{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4},
{{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4},
{{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4},
{{0x11, 0x20, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x21, 0x2a, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x22, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x23, 0x28, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x10, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x11, 0x04, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x12, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x13, 0x03, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x14, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x15, 0xe0, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x16, 0x02, 0x00, 0x00, 0x00}, 4},
{{0x11, 0x17, 0x80, 0x00, 0x00, 0x00}, 4},
{{0x1c, 0x20, 0x00, 0x2a, 0x00, 0x00}, 1},
{{0x1c, 0x20, 0x00, 0x2a, 0x00, 0x00}, 1},
{{0x20, 0x34, 0xa1, 0x00, 0x00, 0x00}, 0}
/* Camera should start to capture now. */
};
return run_start_commands(gspca_dev, genius_start_commands,
ARRAY_SIZE(genius_start_commands));
}
static int start_vivitar_cam(struct gspca_dev *gspca_dev)
{
struct init_command vivitar_start_commands[] = {
{{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x20, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x21, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x22, 0x01, 0x01, 0x00, 0x00}, 4},
{{0x13, 0x23, 0x01, 0x01, 0x00, 0x00}, 4},
{{0x13, 0x24, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x25, 0x01, 0x28, 0x00, 0x00}, 4},
{{0x13, 0x26, 0x01, 0x1e, 0x00, 0x00}, 4},
{{0x13, 0x27, 0x01, 0x20, 0x00, 0x00}, 4},
{{0x13, 0x28, 0x01, 0x0a, 0x00, 0x00}, 4},
/*
* Above is changed from OEM 0x0b. Fixes Bayer tiling.
* Presumably gives a vertical shift of one row.
*/
{{0x13, 0x29, 0x01, 0x20, 0x00, 0x00}, 4},
/* Above seems to do horizontal shift. */
{{0x13, 0x2a, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x2b, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4},
{{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4},
{{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4},
{{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4},
/* Above three commands seem to relate to brightness. */
{{0x12, 0x34, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x13, 0x34, 0x01, 0xa1, 0x00, 0x00}, 4},
{{0x13, 0x35, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x1b, 0x12, 0x80, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x01, 0x77, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x02, 0x3a, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x12, 0x78, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x13, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x14, 0x80, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x15, 0x34, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x1b, 0x04, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x20, 0x44, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x23, 0xee, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x26, 0xa0, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x27, 0x9a, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x28, 0xa0, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x29, 0x30, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x2a, 0x80, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x2b, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x2f, 0x3d, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x30, 0x24, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x32, 0x86, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x60, 0xa9, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x61, 0x42, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x65, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x69, 0x38, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x6f, 0x88, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x70, 0x0b, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x71, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x74, 0x21, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x75, 0x86, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x76, 0x00, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x7d, 0xf3, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x17, 0x1c, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x18, 0xc0, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x19, 0x05, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x1a, 0xf6, 0x00, 0x00, 0x00}, 1},
/* {{0x13, 0x25, 0x01, 0x28, 0x00, 0x00}, 4},
{{0x13, 0x26, 0x01, 0x1e, 0x00, 0x00}, 4},
{{0x13, 0x28, 0x01, 0x0b, 0x00, 0x00}, 4}, */
{{0x20, 0x36, 0x06, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x10, 0x26, 0x00, 0x00, 0x00}, 1},
{{0x12, 0x27, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x1b, 0x76, 0x03, 0x00, 0x00, 0x00}, 1},
{{0x20, 0x36, 0x05, 0x00, 0x00, 0x00}, 1},
{{0x1b, 0x00, 0x3f, 0x00, 0x00, 0x00}, 1},
/* Above is brightness; OEM driver setting is 0x10 */
{{0x12, 0x27, 0x01, 0x00, 0x00, 0x00}, 4},
{{0x20, 0x29, 0x30, 0x00, 0x00, 0x00}, 1},
{{0x20, 0x34, 0xa1, 0x00, 0x00, 0x00}, 1}
};
return run_start_commands(gspca_dev, vivitar_start_commands,
ARRAY_SIZE(vivitar_start_commands));
}
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int err_code;
sd->sof_read = 0;
switch (sd->model) {
case 0x7005:
err_code = start_genius_cam(gspca_dev);
break;
case 0x8001:
err_code = start_spy_cam(gspca_dev);
break;
case 0x8003:
err_code = start_cif_cam(gspca_dev);
break;
case 0x8008:
err_code = start_ms350_cam(gspca_dev);
break;
case 0x800a:
err_code = start_vivitar_cam(gspca_dev);
break;
default:
pr_err("Starting unknown camera, please report this\n");
return -ENXIO;
}
return err_code;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
int result;
__u8 data[6];
result = sn9c2028_read1(gspca_dev);
if (result < 0)
PDEBUG(D_ERR, "Camera Stop read failed");
memset(data, 0, 6);
data[0] = 0x14;
result = sn9c2028_command(gspca_dev, data);
if (result < 0)
PDEBUG(D_ERR, "Camera Stop command failed");
}
/* Include sn9c2028 sof detection functions */
#include "sn9c2028.h"
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
__u8 *data, /* isoc packet */
int len) /* iso packet length */
{
unsigned char *sof;
sof = sn9c2028_find_sof(gspca_dev, data, len);
if (sof) {
int n;
/* finish decoding current frame */
n = sof - data;
if (n > sizeof sn9c2028_sof_marker)
n -= sizeof sn9c2028_sof_marker;
else
n = 0;
gspca_frame_add(gspca_dev, LAST_PACKET, data, n);
/* Start next frame. */
gspca_frame_add(gspca_dev, FIRST_PACKET,
sn9c2028_sof_marker, sizeof sn9c2028_sof_marker);
len -= sof - data;
data = sof;
}
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
.nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0458, 0x7005)}, /* Genius Smart 300, version 2 */
/* The Genius Smart is untested. I can't find an owner ! */
/* {USB_DEVICE(0x0c45, 0x8000)}, DC31VC, Don't know this camera */
{USB_DEVICE(0x0c45, 0x8001)}, /* Wild Planet digital spy cam */
{USB_DEVICE(0x0c45, 0x8003)}, /* Several small CIF cameras */
/* {USB_DEVICE(0x0c45, 0x8006)}, Unknown VGA camera */
{USB_DEVICE(0x0c45, 0x8008)}, /* Mini-Shotz ms-350 */
{USB_DEVICE(0x0c45, 0x800a)}, /* Vivicam 3350B */
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
| gpl-2.0 |
qtekfun/kernel_htc_msm8939 | lib/klist.c | 5468 | 9566 | /*
* klist.c - Routines for manipulating klists.
*
* Copyright (C) 2005 Patrick Mochel
*
* This file is released under the GPL v2.
*
* This klist interface provides a couple of structures that wrap around
* struct list_head to provide explicit list "head" (struct klist) and list
* "node" (struct klist_node) objects. For struct klist, a spinlock is
* included that protects access to the actual list itself. struct
* klist_node provides a pointer to the klist that owns it and a kref
* reference count that indicates the number of current users of that node
* in the list.
*
* The entire point is to provide an interface for iterating over a list
* that is safe and allows for modification of the list during the
* iteration (e.g. insertion and removal), including modification of the
* current node on the list.
*
* It works using a 3rd object type - struct klist_iter - that is declared
* and initialized before an iteration. klist_next() is used to acquire the
* next element in the list. It returns NULL if there are no more items.
* Internally, that routine takes the klist's lock, decrements the
* reference count of the previous klist_node and increments the count of
* the next klist_node. It then drops the lock and returns.
*
* There are primitives for adding and removing nodes to/from a klist.
* When deleting, klist_del() will simply decrement the reference count.
* Only when the count goes to 0 is the node removed from the list.
* klist_remove() will try to delete the node from the list and block until
* it is actually removed. This is useful for objects (like devices) that
* have been removed from the system and must be freed (but must wait until
* all accessors have finished).
*/
#include <linux/klist.h>
#include <linux/export.h>
#include <linux/sched.h>
/*
* Use the lowest bit of n_klist to mark deleted nodes and exclude
* dead ones from iteration.
*/
#define KNODE_DEAD 1LU
#define KNODE_KLIST_MASK ~KNODE_DEAD
static struct klist *knode_klist(struct klist_node *knode)
{
return (struct klist *)
((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
}
static bool knode_dead(struct klist_node *knode)
{
return (unsigned long)knode->n_klist & KNODE_DEAD;
}
static void knode_set_klist(struct klist_node *knode, struct klist *klist)
{
knode->n_klist = klist;
/* no knode deserves to start its life dead */
WARN_ON(knode_dead(knode));
}
static void knode_kill(struct klist_node *knode)
{
/* and no knode should die twice ever either, see we're very humane */
WARN_ON(knode_dead(knode));
*(unsigned long *)&knode->n_klist |= KNODE_DEAD;
}
/**
* klist_init - Initialize a klist structure.
* @k: The klist we're initializing.
* @get: The get function for the embedding object (NULL if none)
* @put: The put function for the embedding object (NULL if none)
*
* Initialises the klist structure. If the klist_node structures are
* going to be embedded in refcounted objects (necessary for safe
* deletion) then the get/put arguments are used to initialise
* functions that take and release references on the embedding
* objects.
*/
void klist_init(struct klist *k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *))
{
INIT_LIST_HEAD(&k->k_list);
spin_lock_init(&k->k_lock);
k->get = get;
k->put = put;
}
EXPORT_SYMBOL_GPL(klist_init);
static void add_head(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
static void add_tail(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add_tail(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
static void klist_node_init(struct klist *k, struct klist_node *n)
{
INIT_LIST_HEAD(&n->n_node);
kref_init(&n->n_ref);
knode_set_klist(n, k);
if (k->get)
k->get(n);
}
/**
* klist_add_head - Initialize a klist_node and add it to front.
* @n: node we're adding.
* @k: klist it's going on.
*/
void klist_add_head(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_head(k, n);
}
EXPORT_SYMBOL_GPL(klist_add_head);
/**
* klist_add_tail - Initialize a klist_node and add it to back.
* @n: node we're adding.
* @k: klist it's going on.
*/
void klist_add_tail(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_tail(k, n);
}
EXPORT_SYMBOL_GPL(klist_add_tail);
/**
* klist_add_after - Init a klist_node and add it after an existing node
* @n: node we're adding.
* @pos: node to put @n after
*/
void klist_add_after(struct klist_node *n, struct klist_node *pos)
{
struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
list_add(&n->n_node, &pos->n_node);
spin_unlock(&k->k_lock);
}
EXPORT_SYMBOL_GPL(klist_add_after);
/**
* klist_add_before - Init a klist_node and add it before an existing node
* @n: node we're adding.
* @pos: node to put @n after
*/
void klist_add_before(struct klist_node *n, struct klist_node *pos)
{
struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
list_add_tail(&n->n_node, &pos->n_node);
spin_unlock(&k->k_lock);
}
EXPORT_SYMBOL_GPL(klist_add_before);
struct klist_waiter {
struct list_head list;
struct klist_node *node;
struct task_struct *process;
int woken;
};
static DEFINE_SPINLOCK(klist_remove_lock);
static LIST_HEAD(klist_remove_waiters);
static void klist_release(struct kref *kref)
{
struct klist_waiter *waiter, *tmp;
struct klist_node *n = container_of(kref, struct klist_node, n_ref);
WARN_ON(!knode_dead(n));
list_del(&n->n_node);
spin_lock(&klist_remove_lock);
list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
if (waiter->node != n)
continue;
list_del(&waiter->list);
waiter->woken = 1;
mb();
wake_up_process(waiter->process);
}
spin_unlock(&klist_remove_lock);
knode_set_klist(n, NULL);
}
static int klist_dec_and_del(struct klist_node *n)
{
return kref_put(&n->n_ref, klist_release);
}
static void klist_put(struct klist_node *n, bool kill)
{
struct klist *k = knode_klist(n);
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
if (kill)
knode_kill(n);
if (!klist_dec_and_del(n))
put = NULL;
spin_unlock(&k->k_lock);
if (put)
put(n);
}
/**
* klist_del - Decrement the reference count of node and try to remove.
* @n: node we're deleting.
*/
void klist_del(struct klist_node *n)
{
klist_put(n, true);
}
EXPORT_SYMBOL_GPL(klist_del);
/**
* klist_remove - Decrement the refcount of node and wait for it to go away.
* @n: node we're removing.
*/
void klist_remove(struct klist_node *n)
{
struct klist_waiter waiter;
waiter.node = n;
waiter.process = current;
waiter.woken = 0;
spin_lock(&klist_remove_lock);
list_add(&waiter.list, &klist_remove_waiters);
spin_unlock(&klist_remove_lock);
klist_del(n);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (waiter.woken)
break;
schedule();
}
__set_current_state(TASK_RUNNING);
}
EXPORT_SYMBOL_GPL(klist_remove);
/**
* klist_node_attached - Say whether a node is bound to a list or not.
* @n: Node that we're testing.
*/
int klist_node_attached(struct klist_node *n)
{
return (n->n_klist != NULL);
}
EXPORT_SYMBOL_GPL(klist_node_attached);
/**
* klist_iter_init_node - Initialize a klist_iter structure.
* @k: klist we're iterating.
* @i: klist_iter we're filling.
* @n: node to start with.
*
* Similar to klist_iter_init(), but starts the action off with @n,
* instead of with the list head.
*/
void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n)
{
i->i_klist = k;
i->i_cur = n;
if (n)
kref_get(&n->n_ref);
}
EXPORT_SYMBOL_GPL(klist_iter_init_node);
/**
* klist_iter_init - Iniitalize a klist_iter structure.
* @k: klist we're iterating.
* @i: klist_iter structure we're filling.
*
* Similar to klist_iter_init_node(), but start with the list head.
*/
void klist_iter_init(struct klist *k, struct klist_iter *i)
{
klist_iter_init_node(k, i, NULL);
}
EXPORT_SYMBOL_GPL(klist_iter_init);
/**
* klist_iter_exit - Finish a list iteration.
* @i: Iterator structure.
*
* Must be called when done iterating over list, as it decrements the
* refcount of the current node. Necessary in case iteration exited before
* the end of the list was reached, and always good form.
*/
void klist_iter_exit(struct klist_iter *i)
{
if (i->i_cur) {
klist_put(i->i_cur, false);
i->i_cur = NULL;
}
}
EXPORT_SYMBOL_GPL(klist_iter_exit);
static struct klist_node *to_klist_node(struct list_head *n)
{
return container_of(n, struct klist_node, n_node);
}
/**
* klist_next - Ante up next node in list.
* @i: Iterator structure.
*
* First grab list lock. Decrement the reference count of the previous
* node, if there was one. Grab the next node, increment its reference
* count, drop the lock, and return that next node.
*/
struct klist_node *klist_next(struct klist_iter *i)
{
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
spin_lock(&i->i_klist->k_lock);
if (last) {
next = to_klist_node(last->n_node.next);
if (!klist_dec_and_del(last))
put = NULL;
} else
next = to_klist_node(i->i_klist->k_list.next);
i->i_cur = NULL;
while (next != to_klist_node(&i->i_klist->k_list)) {
if (likely(!knode_dead(next))) {
kref_get(&next->n_ref);
i->i_cur = next;
break;
}
next = to_klist_node(next->n_node.next);
}
spin_unlock(&i->i_klist->k_lock);
if (put && last)
put(last);
return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_next);
| gpl-2.0 |
Evervolv/android_kernel_lge_msm8974 | arch/tile/lib/strlen_64.c | 7260 | 1165 | /*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
#undef strlen
size_t strlen(const char *s)
{
/* Get an aligned pointer. */
const uintptr_t s_int = (uintptr_t) s;
const uint64_t *p = (const uint64_t *)(s_int & -8);
/* Read the first word, but force bytes before the string to be nonzero.
* This expression works because we know shift counts are taken mod 64.
*/
uint64_t v = *p | ((1ULL << (s_int << 3)) - 1);
uint64_t bits;
while ((bits = __insn_v1cmpeqi(v, 0)) == 0)
v = *++p;
return ((const char *)p) + (__insn_ctz(bits) >> 3) - s;
}
EXPORT_SYMBOL(strlen);
| gpl-2.0 |
flar2/m8-Sense-4.4.4 | fs/xfs/kmem.c | 8028 | 2932 | /*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include "time.h"
#include "kmem.h"
#include "xfs_message.h"
/*
* Greedy allocation. May fail and may return vmalloced memory.
*
* Must be freed using kmem_free_large.
*/
void *
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
{
void *ptr;
size_t kmsize = maxsize;
while (!(ptr = kmem_zalloc_large(kmsize))) {
if ((kmsize >>= 1) <= minsize)
kmsize = minsize;
}
if (ptr)
*size = kmsize;
return ptr;
}
void *
kmem_alloc(size_t size, unsigned int __nocast flags)
{
int retries = 0;
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
do {
ptr = kmalloc(size, lflags);
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
xfs_err(NULL,
"possible memory allocation deadlock in %s (mode:0x%x)",
__func__, lflags);
congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
void *
kmem_zalloc(size_t size, unsigned int __nocast flags)
{
void *ptr;
ptr = kmem_alloc(size, flags);
if (ptr)
memset((char *)ptr, 0, (int)size);
return ptr;
}
void
kmem_free(const void *ptr)
{
if (!is_vmalloc_addr(ptr)) {
kfree(ptr);
} else {
vfree(ptr);
}
}
void *
kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
unsigned int __nocast flags)
{
void *new;
new = kmem_alloc(newsize, flags);
if (ptr) {
if (new)
memcpy(new, ptr,
((oldsize < newsize) ? oldsize : newsize));
kmem_free(ptr);
}
return new;
}
void *
kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
{
int retries = 0;
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
do {
ptr = kmem_cache_alloc(zone, lflags);
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
xfs_err(NULL,
"possible memory allocation deadlock in %s (mode:0x%x)",
__func__, lflags);
congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
void *
kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
{
void *ptr;
ptr = kmem_zone_alloc(zone, flags);
if (ptr)
memset((char *)ptr, 0, kmem_cache_size(zone));
return ptr;
}
| gpl-2.0 |
Rashed97/android_kernel_samsung_lt03lte | arch/arm/mach-msm/mdm_common.c | 349 | 33014 | /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/ioctl.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/mfd/pmic8058.h>
#include <linux/msm_charm.h>
#include <asm/mach-types.h>
#include <asm/uaccess.h>
#include <mach/mdm2.h>
#include <mach/restart.h>
#include <mach/subsystem_notif.h>
#include <mach/subsystem_restart.h>
#include <mach/rpm.h>
#include <mach/gpiomux.h>
#include "msm_watchdog.h"
#include "mdm_private.h"
#include "sysmon.h"
#ifdef CONFIG_SEC_DEBUG
#include <mach/sec_debug.h>
#endif
#define MDM_MODEM_TIMEOUT 6000
#define MDM_MODEM_DELTA 100
#define MDM_BOOT_TIMEOUT 60000L
#define MDM_RDUMP_TIMEOUT 120000L
#define MDM2AP_STATUS_TIMEOUT_MS 60000L
/* Allow a maximum device id of this many digits */
#define MAX_DEVICE_DIGITS 10
#define EXTERNAL_MODEM "external_modem"
#define SUBSYS_NAME_LENGTH \
(sizeof(EXTERNAL_MODEM) + MAX_DEVICE_DIGITS)
#define DEVICE_BASE_NAME "mdm"
#define DEVICE_NAME_LENGTH \
(sizeof(DEVICE_BASE_NAME) + MAX_DEVICE_DIGITS)
#define RD_BUF_SIZE 100
#define SFR_MAX_RETRIES 10
#define SFR_RETRY_INTERVAL 1000
enum gpio_update_config {
GPIO_UPDATE_BOOTING_CONFIG = 1,
GPIO_UPDATE_RUNNING_CONFIG,
};
struct mdm_device {
struct list_head link;
struct mdm_modem_drv mdm_data;
int mdm2ap_status_valid_old_config;
struct gpiomux_setting mdm2ap_status_old_config;
int first_boot;
struct workqueue_struct *mdm_queue;
struct workqueue_struct *mdm_sfr_queue;
unsigned int dump_timeout_ms;
char subsys_name[SUBSYS_NAME_LENGTH];
struct subsys_desc mdm_subsys;
struct subsys_device *mdm_subsys_dev;
char device_name[DEVICE_NAME_LENGTH];
struct miscdevice misc_device;
struct completion mdm_needs_reload;
struct completion mdm_boot;
struct completion mdm_ram_dumps;
int mdm_errfatal_irq;
int mdm_status_irq;
int mdm_pblrdy_irq;
struct delayed_work mdm2ap_status_check_work;
struct work_struct mdm_status_work;
struct work_struct sfr_reason_work;
struct notifier_block mdm_panic_blk;
int ssr_started_internally;
};
static struct list_head mdm_devices;
static DEFINE_SPINLOCK(mdm_devices_lock);
static int ssr_count;
static DEFINE_SPINLOCK(ssr_lock);
static unsigned int mdm_debug_mask;
int vddmin_gpios_sent;
static struct mdm_ops *mdm_ops;
static void mdm_device_list_add(struct mdm_device *mdev)
{
unsigned long flags;
spin_lock_irqsave(&mdm_devices_lock, flags);
list_add_tail(&mdev->link, &mdm_devices);
spin_unlock_irqrestore(&mdm_devices_lock, flags);
}
static void mdm_device_list_remove(struct mdm_device *mdev)
{
unsigned long flags;
struct mdm_device *lmdev, *tmp;
spin_lock_irqsave(&mdm_devices_lock, flags);
list_for_each_entry_safe(lmdev, tmp, &mdm_devices, link) {
if (mdev && mdev == lmdev) {
pr_debug("%s: removing device id %d\n",
__func__, mdev->mdm_data.device_id);
list_del(&mdev->link);
break;
}
}
spin_unlock_irqrestore(&mdm_devices_lock, flags);
}
/* If the platform's cascading_ssr flag is set, the subsystem
* restart module will restart the other modems so stop
* monitoring them as well.
* This function can be called from interrupt context.
*/
static void mdm_start_ssr(struct mdm_device *mdev)
{
unsigned long flags;
int start_ssr = 1;
spin_lock_irqsave(&ssr_lock, flags);
if (mdev->mdm_data.pdata->cascading_ssr &&
ssr_count > 0) {
start_ssr = 0;
} else {
ssr_count++;
mdev->ssr_started_internally = 1;
}
spin_unlock_irqrestore(&ssr_lock, flags);
if (start_ssr) {
atomic_set(&mdev->mdm_data.mdm_ready, 0);
pr_debug("%s: Resetting mdm id %d due to mdm error\n",
__func__, mdev->mdm_data.device_id);
subsystem_restart_dev(mdev->mdm_subsys_dev);
} else {
pr_debug("%s: Another modem is already in SSR\n",
__func__);
}
}
/* Increment the reference count to handle the case where
* subsystem restart is initiated by the SSR service.
*/
static void mdm_ssr_started(struct mdm_device *mdev)
{
unsigned long flags;
spin_lock_irqsave(&ssr_lock, flags);
ssr_count++;
atomic_set(&mdev->mdm_data.mdm_ready, 0);
spin_unlock_irqrestore(&ssr_lock, flags);
}
/* mdm_ssr_completed assumes that mdm_ssr_started has previously
* been called.
*/
static void mdm_ssr_completed(struct mdm_device *mdev)
{
unsigned long flags;
spin_lock_irqsave(&ssr_lock, flags);
ssr_count--;
if (mdev->ssr_started_internally) {
mdev->ssr_started_internally = 0;
ssr_count--;
}
if (ssr_count < 0) {
pr_err("%s: ssr_count = %d\n",
__func__, ssr_count);
panic("%s: ssr_count = %d < 0\n",
__func__, ssr_count);
}
spin_unlock_irqrestore(&ssr_lock, flags);
}
static irqreturn_t mdm_vddmin_change(int irq, void *dev_id)
{
struct mdm_device *mdev = (struct mdm_device *)dev_id;
struct mdm_vddmin_resource *vddmin_res;
int value;
if (!mdev)
goto handled;
vddmin_res = mdev->mdm_data.pdata->vddmin_resource;
if (!vddmin_res)
goto handled;
value = gpio_get_value(
vddmin_res->mdm2ap_vddmin_gpio);
if (value == 0)
pr_debug("External Modem id %d entered Vddmin\n",
mdev->mdm_data.device_id);
else
pr_debug("External Modem id %d exited Vddmin\n",
mdev->mdm_data.device_id);
handled:
return IRQ_HANDLED;
}
/* The vddmin_res resource may not be supported by some platforms. */
static void mdm_setup_vddmin_gpios(void)
{
unsigned long flags;
struct msm_rpm_iv_pair req;
struct mdm_device *mdev;
struct mdm_vddmin_resource *vddmin_res;
int irq, ret;
spin_lock_irqsave(&mdm_devices_lock, flags);
list_for_each_entry(mdev, &mdm_devices, link) {
vddmin_res = mdev->mdm_data.pdata->vddmin_resource;
if (!vddmin_res)
continue;
pr_debug("Enabling vddmin logging on modem id %d\n",
mdev->mdm_data.device_id);
req.id = vddmin_res->rpm_id;
req.value =
((uint32_t)vddmin_res->ap2mdm_vddmin_gpio & 0x0000FFFF)
<< 16;
req.value |= ((uint32_t)vddmin_res->modes & 0x000000FF) << 8;
req.value |= (uint32_t)vddmin_res->drive_strength & 0x000000FF;
msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
/* Start monitoring low power gpio from mdm */
irq = gpio_to_irq(vddmin_res->mdm2ap_vddmin_gpio);
if (irq < 0)
pr_err("%s: could not get LPM POWER IRQ resource mdm id %d.\n",
__func__, mdev->mdm_data.device_id);
else {
ret = request_threaded_irq(irq, NULL, mdm_vddmin_change,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"mdm lpm", mdev);
if (ret < 0)
pr_err("%s: MDM LPM IRQ#%d request failed with error=%d",
__func__, irq, ret);
}
}
spin_unlock_irqrestore(&mdm_devices_lock, flags);
return;
}
static void mdm_restart_reason_fn(struct work_struct *work)
{
int ret, ntries = 0;
char sfr_buf[RD_BUF_SIZE];
struct mdm_platform_data *pdata;
struct mdm_device *mdev = container_of(work,
struct mdm_device, sfr_reason_work);
pdata = mdev->mdm_data.pdata;
if (pdata->sysmon_subsys_id_valid) {
do {
ret = sysmon_get_reason(pdata->sysmon_subsys_id,
sfr_buf, sizeof(sfr_buf));
if (!ret) {
pr_err("mdm restart reason: %s\n", sfr_buf);
return;
}
/* Wait for the modem to be fully booted after a
* subsystem restart. This may take several seconds.
*/
msleep(SFR_RETRY_INTERVAL);
} while (++ntries < SFR_MAX_RETRIES);
pr_debug("%s: Error retrieving restart reason: %d\n",
__func__, ret);
}
}
static void mdm2ap_status_check(struct work_struct *work)
{
struct mdm_device *mdev =
container_of(work, struct mdm_device,
mdm2ap_status_check_work.work);
struct mdm_modem_drv *mdm_drv = &mdev->mdm_data;
/*
* If the mdm modem did not pull the MDM2AP_STATUS gpio
* high then call subsystem_restart.
*/
if (!mdm_drv->disable_status_check) {
if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0) {
pr_debug("%s: MDM2AP_STATUS did not go high on mdm id %d\n",
__func__, mdev->mdm_data.device_id);
mdm_start_ssr(mdev);
}
}
}
static void mdm_update_gpio_configs(struct mdm_device *mdev,
enum gpio_update_config gpio_config)
{
struct mdm_modem_drv *mdm_drv = &mdev->mdm_data;
/* Some gpio configuration may need updating after modem bootup.*/
switch (gpio_config) {
case GPIO_UPDATE_RUNNING_CONFIG:
if (mdm_drv->pdata->mdm2ap_status_gpio_run_cfg) {
if (msm_gpiomux_write(mdm_drv->mdm2ap_status_gpio,
GPIOMUX_ACTIVE,
mdm_drv->pdata->mdm2ap_status_gpio_run_cfg,
&mdev->mdm2ap_status_old_config))
pr_err("%s: failed updating running gpio config mdm id %d\n",
__func__, mdev->mdm_data.device_id);
else
mdev->mdm2ap_status_valid_old_config = 1;
}
break;
case GPIO_UPDATE_BOOTING_CONFIG:
if (mdev->mdm2ap_status_valid_old_config) {
msm_gpiomux_write(mdm_drv->mdm2ap_status_gpio,
GPIOMUX_ACTIVE,
&mdev->mdm2ap_status_old_config,
NULL);
mdev->mdm2ap_status_valid_old_config = 0;
}
break;
default:
pr_err("%s: called with no config\n", __func__);
break;
}
}
static long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int status, ret = 0;
struct mdm_device *mdev = filp->private_data;
struct mdm_modem_drv *mdm_drv;
if (_IOC_TYPE(cmd) != CHARM_CODE) {
pr_err("%s: invalid ioctl code to mdm id %d\n",
__func__, mdev->mdm_data.device_id);
return -EINVAL;
}
mdm_drv = &mdev->mdm_data;
pr_debug("%s: Entering ioctl cmd = %d, mdm id = %d\n",
__func__, _IOC_NR(cmd), mdev->mdm_data.device_id);
switch (cmd) {
case WAKE_CHARM:
pr_debug("%s: Powering on mdm id %d\n",
__func__, mdev->mdm_data.device_id);
mdm_ops->power_on_mdm_cb(mdm_drv);
break;
case CHECK_FOR_BOOT:
if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
put_user(1, (unsigned long __user *) arg);
else
put_user(0, (unsigned long __user *) arg);
break;
case NORMAL_BOOT_DONE:
pr_debug("%s: check if mdm id %d is booted up\n",
__func__, mdev->mdm_data.device_id);
get_user(status, (unsigned long __user *) arg);
if (status) {
pr_debug("%s: normal boot of mdm id %d failed\n",
__func__, mdev->mdm_data.device_id);
mdm_drv->mdm_boot_status = -EIO;
} else {
pr_debug("%s: normal boot of mdm id %d done\n",
__func__, mdev->mdm_data.device_id);
mdm_drv->mdm_boot_status = 0;
}
atomic_set(&mdm_drv->mdm_ready, 1);
if (mdm_ops->normal_boot_done_cb != NULL)
mdm_ops->normal_boot_done_cb(mdm_drv);
if (!mdev->first_boot)
complete(&mdev->mdm_boot);
else
mdev->first_boot = 0;
/* If successful, start a timer to check that the mdm2ap_status
* gpio goes high.
*/
if (!status && gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
schedule_delayed_work(&mdev->mdm2ap_status_check_work,
msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
break;
case RAM_DUMP_DONE:
pr_debug("%s: mdm done collecting RAM dumps\n", __func__);
get_user(status, (unsigned long __user *) arg);
if (status)
mdm_drv->mdm_ram_dump_status = -EIO;
else {
pr_debug("%s: ramdump collection completed\n",
__func__);
mdm_drv->mdm_ram_dump_status = 0;
#ifdef CONFIG_SEC_DEBUG
if (sec_debug_is_enabled()) {
#ifdef CONFIG_SEC_DEBUG_MDM_FILE_INFO
sec_set_mdm_subsys_info(mdm_read_err_report());
#endif
panic("external_modem %s", mdm_read_err_report());
}
#endif
}
complete(&mdev->mdm_ram_dumps);
break;
case WAIT_FOR_RESTART:
pr_debug("%s: wait for mdm to need images reloaded\n",
__func__);
ret = wait_for_completion_interruptible(
&mdev->mdm_needs_reload);
if (!ret)
put_user(mdm_drv->boot_type,
(unsigned long __user *) arg);
init_completion(&mdev->mdm_needs_reload);
break;
case GET_DLOAD_STATUS:
pr_debug("getting status of mdm2ap_errfatal_gpio\n");
if (gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio) == 1 &&
!atomic_read(&mdm_drv->mdm_ready))
put_user(1, (unsigned long __user *) arg);
else
put_user(0, (unsigned long __user *) arg);
break;
case IMAGE_UPGRADE:
pr_debug("%s Image upgrade ioctl recieved\n", __func__);
if (mdm_drv->pdata->image_upgrade_supported &&
mdm_ops->image_upgrade_cb) {
get_user(status, (unsigned long __user *) arg);
mdm_ops->image_upgrade_cb(mdm_drv, status);
} else
pr_debug("%s Image upgrade not supported\n", __func__);
break;
case SHUTDOWN_CHARM:
if (!mdm_drv->pdata->send_shdn)
break;
atomic_set(&mdm_drv->mdm_ready, 0);
if (mdm_debug_mask & MDM_DEBUG_MASK_SHDN_LOG)
pr_debug("Sending shutdown request to mdm\n");
ret = sysmon_send_shutdown(SYSMON_SS_EXT_MODEM);
if (ret)
pr_err("%s: Graceful shutdown of the external modem failed, ret = %d\n",
__func__, ret);
put_user(ret, (unsigned long __user *) arg);
break;
default:
pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
ret = -EINVAL;
break;
}
return ret;
}
static void mdm_status_fn(struct work_struct *work)
{
struct mdm_device *mdev =
container_of(work, struct mdm_device, mdm_status_work);
struct mdm_modem_drv *mdm_drv = &mdev->mdm_data;
int value = gpio_get_value(mdm_drv->mdm2ap_status_gpio);
pr_debug("%s: status:%d\n", __func__, value);
if (atomic_read(&mdm_drv->mdm_ready) && mdm_ops->status_cb)
mdm_ops->status_cb(mdm_drv, value);
/* Update gpio configuration to "running" config. */
mdm_update_gpio_configs(mdev, GPIO_UPDATE_RUNNING_CONFIG);
}
static void mdm_disable_irqs(struct mdm_device *mdev)
{
if (!mdev)
return;
disable_irq_nosync(mdev->mdm_errfatal_irq);
disable_irq_nosync(mdev->mdm_status_irq);
disable_irq_nosync(mdev->mdm_pblrdy_irq);
}
static irqreturn_t mdm_errfatal(int irq, void *dev_id)
{
struct mdm_modem_drv *mdm_drv;
struct mdm_device *mdev = (struct mdm_device *)dev_id;
if (!mdev)
return IRQ_HANDLED;
pr_debug("%s: mdm id %d sent errfatal interrupt\n",
__func__, mdev->mdm_data.device_id);
mdm_drv = &mdev->mdm_data;
if (atomic_read(&mdm_drv->mdm_ready) &&
(gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 1)) {
pr_debug("%s: Received err fatal from mdm id %d\n",
__func__, mdev->mdm_data.device_id);
mdm_start_ssr(mdev);
}
return IRQ_HANDLED;
}
/* set the mdm_device as the file's private data */
static int mdm_modem_open(struct inode *inode, struct file *file)
{
struct miscdevice *misc = file->private_data;
struct mdm_device *mdev = container_of(misc,
struct mdm_device, misc_device);
file->private_data = mdev;
return 0;
}
static int mdm_panic_prep(struct notifier_block *this,
unsigned long event, void *ptr)
{
int i;
struct mdm_modem_drv *mdm_drv;
struct mdm_device *mdev =
container_of(this, struct mdm_device, mdm_panic_blk);
mdm_drv = &mdev->mdm_data;
pr_debug("%s: setting AP2MDM_ERRFATAL high for a non graceful reset\n",
__func__);
mdm_disable_irqs(mdev);
gpio_set_value(mdm_drv->ap2mdm_errfatal_gpio, 1);
for (i = MDM_MODEM_TIMEOUT; i > 0; i -= MDM_MODEM_DELTA) {
pet_watchdog();
mdelay(MDM_MODEM_DELTA);
if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
break;
}
if (i <= 0) {
pr_err("%s: MDM2AP_STATUS never went low\n", __func__);
/* Reset the modem so that it will go into download mode. */
if (mdm_drv && mdm_ops->atomic_reset_mdm_cb)
mdm_ops->atomic_reset_mdm_cb(mdm_drv);
}
return NOTIFY_DONE;
}
static irqreturn_t mdm_status_change(int irq, void *dev_id)
{
struct mdm_modem_drv *mdm_drv;
struct mdm_device *mdev = (struct mdm_device *)dev_id;
int value;
if (!mdev)
return IRQ_HANDLED;
mdm_drv = &mdev->mdm_data;
value = gpio_get_value(mdm_drv->mdm2ap_status_gpio);
if ((mdm_debug_mask & MDM_DEBUG_MASK_SHDN_LOG) && (value == 0))
pr_debug("%s: mdm2ap_status went low\n", __func__);
pr_debug("%s: mdm id %d sent status change interrupt\n",
__func__, mdev->mdm_data.device_id);
if (value == 0 && atomic_read(&mdm_drv->mdm_ready)) {
pr_debug("%s: unexpected reset external modem id %d\n",
__func__, mdev->mdm_data.device_id);
mdm_drv->mdm_unexpected_reset_occurred = 1;
mdm_start_ssr(mdev);
} else if (value == 1) {
cancel_delayed_work(&mdev->mdm2ap_status_check_work);
pr_debug("%s: status = 1: mdm id %d is now ready\n",
__func__, mdev->mdm_data.device_id);
queue_work(mdev->mdm_queue, &mdev->mdm_status_work);
}
return IRQ_HANDLED;
}
static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
{
struct mdm_modem_drv *mdm_drv;
struct mdm_device *mdev = (struct mdm_device *)dev_id;
if (!mdev)
return IRQ_HANDLED;
mdm_drv = &mdev->mdm_data;
pr_debug("%s: mdm id %d: pbl ready:%d\n",
__func__, mdev->mdm_data.device_id,
gpio_get_value(mdm_drv->mdm2ap_pblrdy));
return IRQ_HANDLED;
}
static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys)
{
struct mdm_device *mdev =
container_of(crashed_subsys, struct mdm_device, mdm_subsys);
struct mdm_modem_drv *mdm_drv = &mdev->mdm_data;
pr_debug("%s: ssr on modem id %d\n", __func__,
mdev->mdm_data.device_id);
mdm_ssr_started(mdev);
cancel_delayed_work(&mdev->mdm2ap_status_check_work);
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
if (mdm_drv->pdata->ramdump_delay_ms > 0) {
/* Wait for the external modem to complete
* its preparation for ramdumps.
*/
msleep(mdm_drv->pdata->ramdump_delay_ms);
}
if (!mdm_drv->mdm_unexpected_reset_occurred) {
mdm_ops->reset_mdm_cb(mdm_drv);
/* Update gpio configuration to "booting" config. */
mdm_update_gpio_configs(mdev, GPIO_UPDATE_BOOTING_CONFIG);
} else {
mdm_drv->mdm_unexpected_reset_occurred = 0;
}
return 0;
}
static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
{
struct mdm_device *mdev =
container_of(crashed_subsys, struct mdm_device,
mdm_subsys);
struct mdm_modem_drv *mdm_drv = &mdev->mdm_data;
pr_debug("%s: ssr on modem id %d\n",
__func__, mdev->mdm_data.device_id);
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 0);
gpio_direction_output(mdm_drv->ap2mdm_status_gpio, 1);
if (mdm_drv->pdata->ps_hold_delay_ms > 0)
msleep(mdm_drv->pdata->ps_hold_delay_ms);
mdm_ops->power_on_mdm_cb(mdm_drv);
mdm_drv->boot_type = CHARM_NORMAL_BOOT;
mdm_ssr_completed(mdev);
complete(&mdev->mdm_needs_reload);
if (!wait_for_completion_timeout(&mdev->mdm_boot,
msecs_to_jiffies(MDM_BOOT_TIMEOUT))) {
mdm_drv->mdm_boot_status = -ETIMEDOUT;
pr_debug("%s: mdm modem restart timed out.\n", __func__);
} else {
pr_debug("%s: id %d: mdm modem has been restarted\n",
__func__, mdm_drv->device_id);
/* Log the reason for the restart */
if (mdm_drv->pdata->sfr_query)
queue_work(mdev->mdm_sfr_queue, &mdev->sfr_reason_work);
}
init_completion(&mdev->mdm_boot);
return mdm_drv->mdm_boot_status;
}
static int mdm_subsys_ramdumps(int want_dumps,
const struct subsys_desc *crashed_subsys)
{
struct mdm_device *mdev =
container_of(crashed_subsys, struct mdm_device,
mdm_subsys);
struct mdm_modem_drv *mdm_drv = &mdev->mdm_data;
pr_debug("%s: ssr on modem id %d\n", __func__,
mdev->mdm_data.device_id);
mdm_drv->mdm_ram_dump_status = 0;
cancel_delayed_work(&mdev->mdm2ap_status_check_work);
if (want_dumps) {
mdm_drv->boot_type = CHARM_RAM_DUMPS;
complete(&mdev->mdm_needs_reload);
if (!wait_for_completion_timeout(&mdev->mdm_ram_dumps,
msecs_to_jiffies(mdev->dump_timeout_ms))) {
mdm_drv->mdm_ram_dump_status = -ETIMEDOUT;
mdm_ssr_completed(mdev);
pr_err("%s: mdm modem ramdumps timed out.\n",
__func__);
} else
pr_debug("%s: mdm modem ramdumps completed.\n",
__func__);
init_completion(&mdev->mdm_ram_dumps);
if (!mdm_drv->pdata->no_powerdown_after_ramdumps) {
mdm_ops->power_down_mdm_cb(mdm_drv);
/* Update gpio configuration to "booting" config. */
mdm_update_gpio_configs(mdev,
GPIO_UPDATE_BOOTING_CONFIG);
}
}
return mdm_drv->mdm_ram_dump_status;
}
/* Once the gpios are sent to RPM and debugging
* starts, there is no way to stop it without
* rebooting the device.
*/
static int mdm_debug_mask_set(void *data, u64 val)
{
if (!vddmin_gpios_sent &&
(val & MDM_DEBUG_MASK_VDDMIN_SETUP)) {
mdm_setup_vddmin_gpios();
vddmin_gpios_sent = 1;
}
mdm_debug_mask = val;
if (mdm_ops->debug_state_changed_cb)
mdm_ops->debug_state_changed_cb(mdm_debug_mask);
return 0;
}
static int mdm_debug_mask_get(void *data, u64 *val)
{
*val = mdm_debug_mask;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(mdm_debug_mask_fops,
mdm_debug_mask_get,
mdm_debug_mask_set, "%llu\n");
static int mdm_debugfs_init(void)
{
struct dentry *dent;
dent = debugfs_create_dir("mdm_dbg", 0);
if (IS_ERR(dent))
return PTR_ERR(dent);
debugfs_create_file("debug_mask", 0644, dent, NULL,
&mdm_debug_mask_fops);
return 0;
}
static const struct file_operations mdm_modem_fops = {
.owner = THIS_MODULE,
.open = mdm_modem_open,
.unlocked_ioctl = mdm_modem_ioctl,
};
static void mdm_modem_initialize_data(struct platform_device *pdev,
struct mdm_device *mdev)
{
struct mdm_modem_drv *mdm_drv = &mdev->mdm_data;
struct resource *pres;
mdm_drv->pdata = pdev->dev.platform_data;
if (pdev->id < 0)
mdm_drv->device_id = 0;
else
mdm_drv->device_id = pdev->id;
memset((void *)&mdev->mdm_subsys, 0,
sizeof(struct subsys_desc));
if (mdev->mdm_data.device_id <= 0)
snprintf(mdev->subsys_name, sizeof(mdev->subsys_name),
"%s", EXTERNAL_MODEM);
else
snprintf(mdev->subsys_name, sizeof(mdev->subsys_name),
"%s.%d", EXTERNAL_MODEM, mdev->mdm_data.device_id);
mdev->mdm_subsys.shutdown = mdm_subsys_shutdown;
mdev->mdm_subsys.ramdump = mdm_subsys_ramdumps;
mdev->mdm_subsys.powerup = mdm_subsys_powerup;
mdev->mdm_subsys.name = mdev->subsys_name;
memset((void *)&mdev->misc_device, 0,
sizeof(struct miscdevice));
if (mdev->mdm_data.device_id <= 0)
snprintf(mdev->device_name, sizeof(mdev->device_name),
"%s", DEVICE_BASE_NAME);
else
snprintf(mdev->device_name, sizeof(mdev->device_name),
"%s%d", DEVICE_BASE_NAME, mdev->mdm_data.device_id);
mdev->misc_device.minor = MISC_DYNAMIC_MINOR;
mdev->misc_device.name = mdev->device_name;
mdev->misc_device.fops = &mdm_modem_fops;
memset((void *)&mdev->mdm_panic_blk, 0,
sizeof(struct notifier_block));
mdev->mdm_panic_blk.notifier_call = mdm_panic_prep;
atomic_notifier_chain_register(&panic_notifier_list,
&mdev->mdm_panic_blk);
/* MDM2AP_ERRFATAL */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"MDM2AP_ERRFATAL");
mdm_drv->mdm2ap_errfatal_gpio = pres ? pres->start : -1;
/* AP2MDM_ERRFATAL */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"AP2MDM_ERRFATAL");
mdm_drv->ap2mdm_errfatal_gpio = pres ? pres->start : -1;
/* MDM2AP_STATUS */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"MDM2AP_STATUS");
mdm_drv->mdm2ap_status_gpio = pres ? pres->start : -1;
/* AP2MDM_STATUS */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"AP2MDM_STATUS");
mdm_drv->ap2mdm_status_gpio = pres ? pres->start : -1;
/* MDM2AP_WAKEUP */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"MDM2AP_WAKEUP");
mdm_drv->mdm2ap_wakeup_gpio = pres ? pres->start : -1;
/* AP2MDM_WAKEUP */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"AP2MDM_WAKEUP");
mdm_drv->ap2mdm_wakeup_gpio = pres ? pres->start : -1;
/* AP2MDM_SOFT_RESET */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"AP2MDM_SOFT_RESET");
mdm_drv->ap2mdm_soft_reset_gpio = pres ? pres->start : -1;
/* AP2MDM_KPDPWR_N */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"AP2MDM_KPDPWR_N");
mdm_drv->ap2mdm_kpdpwr_n_gpio = pres ? pres->start : -1;
/* AP2MDM_PMIC_PWR_EN */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"AP2MDM_PMIC_PWR_EN");
mdm_drv->ap2mdm_pmic_pwr_en_gpio = pres ? pres->start : -1;
/* MDM2AP_PBLRDY */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"MDM2AP_PBLRDY");
mdm_drv->mdm2ap_pblrdy = pres ? pres->start : -1;
/*USB_SW*/
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
"USB_SW");
mdm_drv->usb_switch_gpio = pres ? pres->start : -1;
mdm_drv->boot_type = CHARM_NORMAL_BOOT;
mdm_drv->dump_timeout_ms = mdm_drv->pdata->ramdump_timeout_ms > 0 ?
mdm_drv->pdata->ramdump_timeout_ms : MDM_RDUMP_TIMEOUT;
init_completion(&mdev->mdm_needs_reload);
init_completion(&mdev->mdm_boot);
init_completion(&mdev->mdm_ram_dumps);
mdev->first_boot = 1;
mutex_init(&mdm_drv->peripheral_status_lock);
}
static void mdm_deconfigure_ipc(struct mdm_device *mdev)
{
struct mdm_modem_drv *mdm_drv = &mdev->mdm_data;
gpio_free(mdm_drv->ap2mdm_status_gpio);
gpio_free(mdm_drv->ap2mdm_errfatal_gpio);
if (GPIO_IS_VALID(mdm_drv->ap2mdm_kpdpwr_n_gpio))
gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio);
if (GPIO_IS_VALID(mdm_drv->ap2mdm_pmic_pwr_en_gpio))
gpio_free(mdm_drv->ap2mdm_pmic_pwr_en_gpio);
gpio_free(mdm_drv->mdm2ap_status_gpio);
gpio_free(mdm_drv->mdm2ap_errfatal_gpio);
if (GPIO_IS_VALID(mdm_drv->ap2mdm_soft_reset_gpio))
gpio_free(mdm_drv->ap2mdm_soft_reset_gpio);
if (GPIO_IS_VALID(mdm_drv->ap2mdm_wakeup_gpio))
gpio_free(mdm_drv->ap2mdm_wakeup_gpio);
if (mdev->mdm_queue) {
destroy_workqueue(mdev->mdm_queue);
mdev->mdm_queue = NULL;
}
if (mdev->mdm_sfr_queue) {
destroy_workqueue(mdev->mdm_sfr_queue);
mdev->mdm_sfr_queue = NULL;
}
}
static int mdm_configure_ipc(struct mdm_device *mdev)
{
struct mdm_modem_drv *mdm_drv = &mdev->mdm_data;
int ret = -1, irq;
gpio_request(mdm_drv->ap2mdm_status_gpio, "AP2MDM_STATUS");
gpio_request(mdm_drv->ap2mdm_errfatal_gpio, "AP2MDM_ERRFATAL");
if (GPIO_IS_VALID(mdm_drv->ap2mdm_kpdpwr_n_gpio))
gpio_request(mdm_drv->ap2mdm_kpdpwr_n_gpio, "AP2MDM_KPDPWR_N");
gpio_request(mdm_drv->mdm2ap_status_gpio, "MDM2AP_STATUS");
gpio_request(mdm_drv->mdm2ap_errfatal_gpio, "MDM2AP_ERRFATAL");
if (GPIO_IS_VALID(mdm_drv->mdm2ap_pblrdy))
gpio_request(mdm_drv->mdm2ap_pblrdy, "MDM2AP_PBLRDY");
if (GPIO_IS_VALID(mdm_drv->ap2mdm_pmic_pwr_en_gpio))
gpio_request(mdm_drv->ap2mdm_pmic_pwr_en_gpio,
"AP2MDM_PMIC_PWR_EN");
if (GPIO_IS_VALID(mdm_drv->ap2mdm_soft_reset_gpio))
gpio_request(mdm_drv->ap2mdm_soft_reset_gpio,
"AP2MDM_SOFT_RESET");
if (GPIO_IS_VALID(mdm_drv->ap2mdm_wakeup_gpio))
gpio_request(mdm_drv->ap2mdm_wakeup_gpio, "AP2MDM_WAKEUP");
if (GPIO_IS_VALID(mdm_drv->usb_switch_gpio)) {
if (gpio_request(mdm_drv->usb_switch_gpio, "USB_SW")) {
pr_err("%s Failed to get usb switch gpio\n", __func__);
mdm_drv->usb_switch_gpio = -1;
}
}
gpio_direction_output(mdm_drv->ap2mdm_status_gpio, 0);
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 0);
if (GPIO_IS_VALID(mdm_drv->ap2mdm_wakeup_gpio))
gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 0);
gpio_direction_input(mdm_drv->mdm2ap_status_gpio);
gpio_direction_input(mdm_drv->mdm2ap_errfatal_gpio);
mdev->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
if (!mdev->mdm_queue) {
pr_err("%s: could not create mdm_queue for mdm id %d\n",
__func__, mdev->mdm_data.device_id);
ret = -ENOMEM;
goto fatal_err;
}
mdev->mdm_sfr_queue = alloc_workqueue("mdm_sfr_queue", 0, 0);
if (!mdev->mdm_sfr_queue) {
pr_err("%s: could not create mdm_sfr_queue for mdm id %d\n",
__func__, mdev->mdm_data.device_id);
ret = -ENOMEM;
goto fatal_err;
}
/* Register subsystem handlers */
mdev->mdm_subsys_dev = subsys_register(&mdev->mdm_subsys);
if (IS_ERR(mdev->mdm_subsys_dev)) {
ret = PTR_ERR(mdev->mdm_subsys_dev);
goto fatal_err;
}
subsys_default_online(mdev->mdm_subsys_dev);
/* ERR_FATAL irq. */
irq = gpio_to_irq(mdm_drv->mdm2ap_errfatal_gpio);
if (irq < 0) {
pr_err("%s: bad MDM2AP_ERRFATAL IRQ resource, err = %d\n",
__func__, irq);
goto errfatal_err;
}
ret = request_irq(irq, mdm_errfatal,
IRQF_TRIGGER_RISING , "mdm errfatal", mdev);
if (ret < 0) {
pr_err("%s: MDM2AP_ERRFATAL IRQ#%d request failed, err=%d\n",
__func__, irq, ret);
goto errfatal_err;
}
mdev->mdm_errfatal_irq = irq;
errfatal_err:
/* status irq */
irq = gpio_to_irq(mdm_drv->mdm2ap_status_gpio);
if (irq < 0) {
pr_err("%s: bad MDM2AP_STATUS IRQ resource, err = %d\n",
__func__, irq);
goto status_err;
}
ret = request_threaded_irq(irq, NULL, mdm_status_change,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_SHARED,
"mdm status", mdev);
if (ret < 0) {
pr_err("%s: MDM2AP_STATUS IRQ#%d request failed, err=%d",
__func__, irq, ret);
goto status_err;
}
mdev->mdm_status_irq = irq;
status_err:
if (GPIO_IS_VALID(mdm_drv->mdm2ap_pblrdy)) {
irq = gpio_to_irq(mdm_drv->mdm2ap_pblrdy);
if (irq < 0) {
pr_err("%s: could not get MDM2AP_PBLRDY IRQ resource\n",
__func__);
goto pblrdy_err;
}
ret = request_threaded_irq(irq, NULL, mdm_pblrdy_change,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_SHARED,
"mdm pbl ready", mdev);
if (ret < 0) {
pr_err("%s: MDM2AP_PBL IRQ#%d request failed error=%d\n",
__func__, irq, ret);
goto pblrdy_err;
}
mdev->mdm_pblrdy_irq = irq;
}
pblrdy_err:
/*
* If AP2MDM_PMIC_PWR_EN gpio is used, pull it high. It remains
* high until the whole phone is shut down.
*/
if (GPIO_IS_VALID(mdm_drv->ap2mdm_pmic_pwr_en_gpio))
gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 1);
return 0;
fatal_err:
mdm_deconfigure_ipc(mdev);
return ret;
}
static int __devinit mdm_modem_probe(struct platform_device *pdev)
{
struct mdm_device *mdev = NULL;
int ret = -1;
mdev = kzalloc(sizeof(struct mdm_device), GFP_KERNEL);
if (!mdev) {
pr_err("%s: kzalloc fail.\n", __func__);
ret = -ENOMEM;
goto init_err;
}
platform_set_drvdata(pdev, mdev);
mdm_modem_initialize_data(pdev, mdev);
if (mdm_ops->debug_state_changed_cb)
mdm_ops->debug_state_changed_cb(mdm_debug_mask);
if (mdm_configure_ipc(mdev)) {
pr_err("%s: mdm_configure_ipc failed, id = %d\n",
__func__, mdev->mdm_data.device_id);
goto init_err;
}
pr_debug("%s: Registering mdm id %d\n", __func__,
mdev->mdm_data.device_id);
ret = misc_register(&mdev->misc_device);
if (ret) {
pr_err("%s: failed registering mdm id %d, ret = %d\n",
__func__, mdev->mdm_data.device_id, ret);
mdm_deconfigure_ipc(mdev);
goto init_err;
} else {
pr_err("%s: registered mdm id %d\n",
__func__, mdev->mdm_data.device_id);
mdm_device_list_add(mdev);
INIT_DELAYED_WORK(&mdev->mdm2ap_status_check_work,
mdm2ap_status_check);
INIT_WORK(&mdev->mdm_status_work, mdm_status_fn);
INIT_WORK(&mdev->sfr_reason_work, mdm_restart_reason_fn);
/* Perform early powerup of the external modem in order to
* allow tabla devices to be found.
*/
if (mdev->mdm_data.pdata->early_power_on)
mdm_ops->power_on_mdm_cb(&mdev->mdm_data);
}
return ret;
init_err:
kfree(mdev);
return ret;
}
static int __devexit mdm_modem_remove(struct platform_device *pdev)
{
int ret;
struct mdm_device *mdev = platform_get_drvdata(pdev);
pr_debug("%s: removing device id %d\n",
__func__, mdev->mdm_data.device_id);
mdm_deconfigure_ipc(mdev);
ret = misc_deregister(&mdev->misc_device);
mdm_device_list_remove(mdev);
kfree(mdev);
return ret;
}
static void mdm_modem_shutdown(struct platform_device *pdev)
{
struct mdm_modem_drv *mdm_drv;
struct mdm_device *mdev = platform_get_drvdata(pdev);
pr_debug("%s: shutting down device id %d\n",
__func__, mdev->mdm_data.device_id);
mdm_disable_irqs(mdev);
mdm_drv = &mdev->mdm_data;
mdm_ops->power_down_mdm_cb(mdm_drv);
if (GPIO_IS_VALID(mdm_drv->ap2mdm_pmic_pwr_en_gpio))
gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 0);
}
static struct of_device_id mdm_match_table[] = {
{.compatible = "qcom,mdm2_modem,mdm2_modem.1"},
{},
};
static struct platform_driver mdm_modem_driver = {
.probe = mdm_modem_probe,
.remove = __devexit_p(mdm_modem_remove),
.shutdown = mdm_modem_shutdown,
.driver = {
.name = "mdm2_modem",
.owner = THIS_MODULE,
.of_match_table = mdm_match_table,
},
};
static int __init mdm_modem_init(void)
{
int ret;
ret = mdm_get_ops(&mdm_ops);
if (ret)
return ret;
INIT_LIST_HEAD(&mdm_devices);
mdm_debugfs_init();
return platform_driver_register(&mdm_modem_driver);
}
static void __exit mdm_modem_exit(void)
{
platform_driver_unregister(&mdm_modem_driver);
}
module_init(mdm_modem_init);
module_exit(mdm_modem_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("mdm modem driver");
MODULE_VERSION("2.0");
MODULE_ALIAS("mdm_modem");
| gpl-2.0 |
alexey6600/kernel_sony_tetra_2 | fs/adfs/dir.c | 2141 | 5761 | /*
* linux/fs/adfs/dir.c
*
* Copyright (C) 1999-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Common directory handling for ADFS
*/
#include "adfs.h"
/*
* For future. This should probably be per-directory.
*/
static DEFINE_RWLOCK(adfs_dir_lock);
static int
adfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
struct object_info obj;
struct adfs_dir dir;
int ret = 0;
if (filp->f_pos >> 32)
goto out;
ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
if (ret)
goto out;
switch ((unsigned long)filp->f_pos) {
case 0:
if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
goto free_out;
filp->f_pos += 1;
case 1:
if (filldir(dirent, "..", 2, 1, dir.parent_id, DT_DIR) < 0)
goto free_out;
filp->f_pos += 1;
default:
break;
}
read_lock(&adfs_dir_lock);
ret = ops->setpos(&dir, filp->f_pos - 2);
if (ret)
goto unlock_out;
while (ops->getnext(&dir, &obj) == 0) {
if (filldir(dirent, obj.name, obj.name_len,
filp->f_pos, obj.file_id, DT_UNKNOWN) < 0)
goto unlock_out;
filp->f_pos += 1;
}
unlock_out:
read_unlock(&adfs_dir_lock);
free_out:
ops->free(&dir);
out:
return ret;
}
int
adfs_dir_update(struct super_block *sb, struct object_info *obj, int wait)
{
int ret = -EINVAL;
#ifdef CONFIG_ADFS_FS_RW
struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
struct adfs_dir dir;
printk(KERN_INFO "adfs_dir_update: object %06X in dir %06X\n",
obj->file_id, obj->parent_id);
if (!ops->update) {
ret = -EINVAL;
goto out;
}
ret = ops->read(sb, obj->parent_id, 0, &dir);
if (ret)
goto out;
write_lock(&adfs_dir_lock);
ret = ops->update(&dir, obj);
write_unlock(&adfs_dir_lock);
if (wait) {
int err = ops->sync(&dir);
if (!ret)
ret = err;
}
ops->free(&dir);
out:
#endif
return ret;
}
static int
adfs_match(struct qstr *name, struct object_info *obj)
{
int i;
if (name->len != obj->name_len)
return 0;
for (i = 0; i < name->len; i++) {
char c1, c2;
c1 = name->name[i];
c2 = obj->name[i];
if (c1 >= 'A' && c1 <= 'Z')
c1 += 'a' - 'A';
if (c2 >= 'A' && c2 <= 'Z')
c2 += 'a' - 'A';
if (c1 != c2)
return 0;
}
return 1;
}
static int
adfs_dir_lookup_byname(struct inode *inode, struct qstr *name, struct object_info *obj)
{
struct super_block *sb = inode->i_sb;
struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
struct adfs_dir dir;
int ret;
ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
if (ret)
goto out;
if (ADFS_I(inode)->parent_id != dir.parent_id) {
adfs_error(sb, "parent directory changed under me! (%lx but got %lx)\n",
ADFS_I(inode)->parent_id, dir.parent_id);
ret = -EIO;
goto free_out;
}
obj->parent_id = inode->i_ino;
/*
* '.' is handled by reserved_lookup() in fs/namei.c
*/
if (name->len == 2 && name->name[0] == '.' && name->name[1] == '.') {
/*
* Currently unable to fill in the rest of 'obj',
* but this is better than nothing. We need to
* ascend one level to find it's parent.
*/
obj->name_len = 0;
obj->file_id = obj->parent_id;
goto free_out;
}
read_lock(&adfs_dir_lock);
ret = ops->setpos(&dir, 0);
if (ret)
goto unlock_out;
ret = -ENOENT;
while (ops->getnext(&dir, obj) == 0) {
if (adfs_match(name, obj)) {
ret = 0;
break;
}
}
unlock_out:
read_unlock(&adfs_dir_lock);
free_out:
ops->free(&dir);
out:
return ret;
}
const struct file_operations adfs_dir_operations = {
.read = generic_read_dir,
.llseek = generic_file_llseek,
.readdir = adfs_readdir,
.fsync = generic_file_fsync,
};
static int
adfs_hash(const struct dentry *parent, const struct inode *inode,
struct qstr *qstr)
{
const unsigned int name_len = ADFS_SB(parent->d_sb)->s_namelen;
const unsigned char *name;
unsigned long hash;
int i;
if (qstr->len < name_len)
return 0;
/*
* Truncate the name in place, avoids
* having to define a compare function.
*/
qstr->len = i = name_len;
name = qstr->name;
hash = init_name_hash();
while (i--) {
char c;
c = *name++;
if (c >= 'A' && c <= 'Z')
c += 'a' - 'A';
hash = partial_name_hash(c, hash);
}
qstr->hash = end_name_hash(hash);
return 0;
}
/*
* Compare two names, taking note of the name length
* requirements of the underlying filesystem.
*/
static int
adfs_compare(const struct dentry *parent, const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
int i;
if (len != name->len)
return 1;
for (i = 0; i < name->len; i++) {
char a, b;
a = str[i];
b = name->name[i];
if (a >= 'A' && a <= 'Z')
a += 'a' - 'A';
if (b >= 'A' && b <= 'Z')
b += 'a' - 'A';
if (a != b)
return 1;
}
return 0;
}
const struct dentry_operations adfs_dentry_operations = {
.d_hash = adfs_hash,
.d_compare = adfs_compare,
};
static struct dentry *
adfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode = NULL;
struct object_info obj;
int error;
error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj);
if (error == 0) {
error = -EACCES;
/*
* This only returns NULL if get_empty_inode
* fails.
*/
inode = adfs_iget(dir->i_sb, &obj);
if (inode)
error = 0;
}
d_add(dentry, inode);
return ERR_PTR(error);
}
/*
* directories can handle most operations...
*/
const struct inode_operations adfs_dir_inode_operations = {
.lookup = adfs_lookup,
.setattr = adfs_notify_change,
};
| gpl-2.0 |
dimon2242/Neuro_kernel | arch/arm/mach-exynos4/dev-ahci.c | 2653 | 7312 | /* linux/arch/arm/mach-exynos4/dev-ahci.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS4 - AHCI support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/ahci_platform.h>
#include <plat/cpu.h>
#include <mach/irqs.h>
#include <mach/map.h>
#include <mach/regs-pmu.h>
/* PHY Control Register */
#define SATA_CTRL0 0x0
/* PHY Link Control Register */
#define SATA_CTRL1 0x4
/* PHY Status Register */
#define SATA_PHY_STATUS 0x8
#define SATA_CTRL0_RX_DATA_VALID(x) (x << 27)
#define SATA_CTRL0_SPEED_MODE (1 << 26)
#define SATA_CTRL0_M_PHY_CAL (1 << 19)
#define SATA_CTRL0_PHY_CMU_RST_N (1 << 10)
#define SATA_CTRL0_M_PHY_LN_RST_N (1 << 9)
#define SATA_CTRL0_PHY_POR_N (1 << 8)
#define SATA_CTRL1_RST_PMALIVE_N (1 << 8)
#define SATA_CTRL1_RST_RXOOB_N (1 << 7)
#define SATA_CTRL1_RST_RX_N (1 << 6)
#define SATA_CTRL1_RST_TX_N (1 << 5)
#define SATA_PHY_STATUS_CMU_OK (1 << 18)
#define SATA_PHY_STATUS_LANE_OK (1 << 16)
#define LANE0 0x200
#define COM_LANE 0xA00
#define HOST_PORTS_IMPL 0xC
#define SCLK_SATA_FREQ (67 * MHZ)
static void __iomem *phy_base, *phy_ctrl;
struct phy_reg {
u8 reg;
u8 val;
};
/* SATA PHY setup */
static const struct phy_reg exynos4_sataphy_cmu[] = {
{ 0x00, 0x06 }, { 0x02, 0x80 }, { 0x22, 0xa0 }, { 0x23, 0x42 },
{ 0x2e, 0x04 }, { 0x2f, 0x50 }, { 0x30, 0x70 }, { 0x31, 0x02 },
{ 0x32, 0x25 }, { 0x33, 0x40 }, { 0x34, 0x01 }, { 0x35, 0x40 },
{ 0x61, 0x2e }, { 0x63, 0x5e }, { 0x65, 0x42 }, { 0x66, 0xd1 },
{ 0x67, 0x20 }, { 0x68, 0x28 }, { 0x69, 0x78 }, { 0x6a, 0x04 },
{ 0x6b, 0xc8 }, { 0x6c, 0x06 },
};
static const struct phy_reg exynos4_sataphy_lane[] = {
{ 0x00, 0x02 }, { 0x05, 0x10 }, { 0x06, 0x84 }, { 0x07, 0x04 },
{ 0x08, 0xe0 }, { 0x10, 0x23 }, { 0x13, 0x05 }, { 0x14, 0x30 },
{ 0x15, 0x00 }, { 0x17, 0x70 }, { 0x18, 0xf2 }, { 0x19, 0x1e },
{ 0x1a, 0x18 }, { 0x1b, 0x0d }, { 0x1c, 0x08 }, { 0x50, 0x60 },
{ 0x51, 0x0f },
};
static const struct phy_reg exynos4_sataphy_comlane[] = {
{ 0x01, 0x20 }, { 0x03, 0x40 }, { 0x04, 0x3c }, { 0x05, 0x7d },
{ 0x06, 0x1d }, { 0x07, 0xcf }, { 0x08, 0x05 }, { 0x09, 0x63 },
{ 0x0a, 0x29 }, { 0x0b, 0xc4 }, { 0x0c, 0x01 }, { 0x0d, 0x03 },
{ 0x0e, 0x28 }, { 0x0f, 0x98 }, { 0x10, 0x19 }, { 0x13, 0x80 },
{ 0x14, 0xf0 }, { 0x15, 0xd0 }, { 0x39, 0xa0 }, { 0x3a, 0xa0 },
{ 0x3b, 0xa0 }, { 0x3c, 0xa0 }, { 0x3d, 0xa0 }, { 0x3e, 0xa0 },
{ 0x3f, 0xa0 }, { 0x40, 0x42 }, { 0x42, 0x80 }, { 0x43, 0x58 },
{ 0x45, 0x44 }, { 0x46, 0x5c }, { 0x47, 0x86 }, { 0x48, 0x8d },
{ 0x49, 0xd0 }, { 0x4a, 0x09 }, { 0x4b, 0x90 }, { 0x4c, 0x07 },
{ 0x4d, 0x40 }, { 0x51, 0x20 }, { 0x52, 0x32 }, { 0x7f, 0xd8 },
{ 0x80, 0x1a }, { 0x81, 0xff }, { 0x82, 0x11 }, { 0x83, 0x00 },
{ 0x87, 0xf0 }, { 0x87, 0xff }, { 0x87, 0xff }, { 0x87, 0xff },
{ 0x87, 0xff }, { 0x8c, 0x1c }, { 0x8d, 0xc2 }, { 0x8e, 0xc3 },
{ 0x8f, 0x3f }, { 0x90, 0x0a }, { 0x96, 0xf8 },
};
static int wait_for_phy_ready(void __iomem *reg, unsigned long bit)
{
unsigned long timeout;
/* wait for maximum of 3 sec */
timeout = jiffies + msecs_to_jiffies(3000);
while (!(__raw_readl(reg) & bit)) {
if (time_after(jiffies, timeout))
return -1;
cpu_relax();
}
return 0;
}
static int ahci_phy_init(void __iomem *mmio)
{
int i, ctrl0;
for (i = 0; i < ARRAY_SIZE(exynos4_sataphy_cmu); i++)
__raw_writeb(exynos4_sataphy_cmu[i].val,
phy_base + (exynos4_sataphy_cmu[i].reg * 4));
for (i = 0; i < ARRAY_SIZE(exynos4_sataphy_lane); i++)
__raw_writeb(exynos4_sataphy_lane[i].val,
phy_base + (LANE0 + exynos4_sataphy_lane[i].reg) * 4);
for (i = 0; i < ARRAY_SIZE(exynos4_sataphy_comlane); i++)
__raw_writeb(exynos4_sataphy_comlane[i].val,
phy_base + (COM_LANE + exynos4_sataphy_comlane[i].reg) * 4);
__raw_writeb(0x07, phy_base);
ctrl0 = __raw_readl(phy_ctrl + SATA_CTRL0);
ctrl0 |= SATA_CTRL0_PHY_CMU_RST_N;
__raw_writel(ctrl0, phy_ctrl + SATA_CTRL0);
if (wait_for_phy_ready(phy_ctrl + SATA_PHY_STATUS,
SATA_PHY_STATUS_CMU_OK) < 0) {
printk(KERN_ERR "PHY CMU not ready\n");
return -EBUSY;
}
__raw_writeb(0x03, phy_base + (COM_LANE * 4));
ctrl0 = __raw_readl(phy_ctrl + SATA_CTRL0);
ctrl0 |= SATA_CTRL0_M_PHY_LN_RST_N;
__raw_writel(ctrl0, phy_ctrl + SATA_CTRL0);
if (wait_for_phy_ready(phy_ctrl + SATA_PHY_STATUS,
SATA_PHY_STATUS_LANE_OK) < 0) {
printk(KERN_ERR "PHY LANE not ready\n");
return -EBUSY;
}
ctrl0 = __raw_readl(phy_ctrl + SATA_CTRL0);
ctrl0 |= SATA_CTRL0_M_PHY_CAL;
__raw_writel(ctrl0, phy_ctrl + SATA_CTRL0);
return 0;
}
static int exynos4_ahci_init(struct device *dev, void __iomem *mmio)
{
struct clk *clk_sata, *clk_sataphy, *clk_sclk_sata;
int val, ret;
phy_base = ioremap(EXYNOS4_PA_SATAPHY, SZ_64K);
if (!phy_base) {
dev_err(dev, "failed to allocate memory for SATA PHY\n");
return -ENOMEM;
}
phy_ctrl = ioremap(EXYNOS4_PA_SATAPHY_CTRL, SZ_16);
if (!phy_ctrl) {
dev_err(dev, "failed to allocate memory for SATA PHY CTRL\n");
ret = -ENOMEM;
goto err1;
}
clk_sata = clk_get(dev, "sata");
if (IS_ERR(clk_sata)) {
dev_err(dev, "failed to get sata clock\n");
ret = PTR_ERR(clk_sata);
clk_sata = NULL;
goto err2;
}
clk_enable(clk_sata);
clk_sataphy = clk_get(dev, "sataphy");
if (IS_ERR(clk_sataphy)) {
dev_err(dev, "failed to get sataphy clock\n");
ret = PTR_ERR(clk_sataphy);
clk_sataphy = NULL;
goto err3;
}
clk_enable(clk_sataphy);
clk_sclk_sata = clk_get(dev, "sclk_sata");
if (IS_ERR(clk_sclk_sata)) {
dev_err(dev, "failed to get sclk_sata\n");
ret = PTR_ERR(clk_sclk_sata);
clk_sclk_sata = NULL;
goto err4;
}
clk_enable(clk_sclk_sata);
clk_set_rate(clk_sclk_sata, SCLK_SATA_FREQ);
__raw_writel(S5P_PMU_SATA_PHY_CONTROL_EN, S5P_PMU_SATA_PHY_CONTROL);
/* Enable PHY link control */
val = SATA_CTRL1_RST_PMALIVE_N | SATA_CTRL1_RST_RXOOB_N |
SATA_CTRL1_RST_RX_N | SATA_CTRL1_RST_TX_N;
__raw_writel(val, phy_ctrl + SATA_CTRL1);
/* Set communication speed as 3Gbps and enable PHY power */
val = SATA_CTRL0_RX_DATA_VALID(3) | SATA_CTRL0_SPEED_MODE |
SATA_CTRL0_PHY_POR_N;
__raw_writel(val, phy_ctrl + SATA_CTRL0);
/* Port0 is available */
__raw_writel(0x1, mmio + HOST_PORTS_IMPL);
return ahci_phy_init(mmio);
err4:
clk_disable(clk_sataphy);
clk_put(clk_sataphy);
err3:
clk_disable(clk_sata);
clk_put(clk_sata);
err2:
iounmap(phy_ctrl);
err1:
iounmap(phy_base);
return ret;
}
static struct ahci_platform_data exynos4_ahci_pdata = {
.init = exynos4_ahci_init,
};
static struct resource exynos4_ahci_resource[] = {
[0] = {
.start = EXYNOS4_PA_SATA,
.end = EXYNOS4_PA_SATA + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SATA,
.end = IRQ_SATA,
.flags = IORESOURCE_IRQ,
},
};
static u64 exynos4_ahci_dmamask = DMA_BIT_MASK(32);
struct platform_device exynos4_device_ahci = {
.name = "ahci",
.id = -1,
.resource = exynos4_ahci_resource,
.num_resources = ARRAY_SIZE(exynos4_ahci_resource),
.dev = {
.platform_data = &exynos4_ahci_pdata,
.dma_mask = &exynos4_ahci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
| gpl-2.0 |
CyanogenMod/android_kernel_sony_msm8660 | security/selinux/nlmsgtab.c | 4189 | 6554 | /*
* Netlink message type permission tables, for user generated messages.
*
* Author: James Morris <jmorris@redhat.com>
*
* Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/if.h>
#include <linux/netfilter_ipv4/ip_queue.h>
#include <linux/inet_diag.h>
#include <linux/xfrm.h>
#include <linux/audit.h>
#include "flask.h"
#include "av_permissions.h"
#include "security.h"
struct nlmsg_perm {
u16 nlmsg_type;
u32 perm;
};
static struct nlmsg_perm nlmsg_route_perms[] =
{
{ RTM_NEWLINK, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELLINK, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETLINK, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_SETLINK, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_NEWADDR, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELADDR, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETADDR, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWROUTE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELROUTE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETROUTE, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWNEIGH, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELNEIGH, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETNEIGH, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWRULE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELRULE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETRULE, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWQDISC, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELQDISC, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETQDISC, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWTCLASS, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELTCLASS, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETTCLASS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWTFILTER, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELTFILTER, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETTFILTER, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWACTION, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELACTION, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETACTION, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWPREFIX, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETMULTICAST, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_GETANYCAST, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_GETNEIGHTBL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_SETNEIGHTBL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
};
static struct nlmsg_perm nlmsg_firewall_perms[] =
{
{ IPQM_MODE, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE },
{ IPQM_VERDICT, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE },
};
static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
{
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_xfrm_perms[] =
{
{ XFRM_MSG_NEWSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_DELSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_GETSA, NETLINK_XFRM_SOCKET__NLMSG_READ },
{ XFRM_MSG_NEWPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_DELPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_GETPOLICY, NETLINK_XFRM_SOCKET__NLMSG_READ },
{ XFRM_MSG_ALLOCSPI, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_ACQUIRE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_EXPIRE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_UPDPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_UPDSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_POLEXPIRE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_FLUSHSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_FLUSHPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_NEWAE, NETLINK_XFRM_SOCKET__NLMSG_WRITE },
{ XFRM_MSG_GETAE, NETLINK_XFRM_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_audit_perms[] =
{
{ AUDIT_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ },
{ AUDIT_SET, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_LIST, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV },
{ AUDIT_ADD, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_DEL, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_LIST_RULES, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV },
{ AUDIT_ADD_RULE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_DEL_RULE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_USER, NETLINK_AUDIT_SOCKET__NLMSG_RELAY },
{ AUDIT_SIGNAL_INFO, NETLINK_AUDIT_SOCKET__NLMSG_READ },
{ AUDIT_TRIM, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ },
{ AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT },
};
static int nlmsg_perm(u16 nlmsg_type, u32 *perm, struct nlmsg_perm *tab, size_t tabsize)
{
int i, err = -EINVAL;
for (i = 0; i < tabsize/sizeof(struct nlmsg_perm); i++)
if (nlmsg_type == tab[i].nlmsg_type) {
*perm = tab[i].perm;
err = 0;
break;
}
return err;
}
int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
{
int err = 0;
switch (sclass) {
case SECCLASS_NETLINK_ROUTE_SOCKET:
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
break;
case SECCLASS_NETLINK_FIREWALL_SOCKET:
case SECCLASS_NETLINK_IP6FW_SOCKET:
err = nlmsg_perm(nlmsg_type, perm, nlmsg_firewall_perms,
sizeof(nlmsg_firewall_perms));
break;
case SECCLASS_NETLINK_TCPDIAG_SOCKET:
err = nlmsg_perm(nlmsg_type, perm, nlmsg_tcpdiag_perms,
sizeof(nlmsg_tcpdiag_perms));
break;
case SECCLASS_NETLINK_XFRM_SOCKET:
err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
sizeof(nlmsg_xfrm_perms));
break;
case SECCLASS_NETLINK_AUDIT_SOCKET:
if ((nlmsg_type >= AUDIT_FIRST_USER_MSG &&
nlmsg_type <= AUDIT_LAST_USER_MSG) ||
(nlmsg_type >= AUDIT_FIRST_USER_MSG2 &&
nlmsg_type <= AUDIT_LAST_USER_MSG2)) {
*perm = NETLINK_AUDIT_SOCKET__NLMSG_RELAY;
} else {
err = nlmsg_perm(nlmsg_type, perm, nlmsg_audit_perms,
sizeof(nlmsg_audit_perms));
}
break;
/* No messaging from userspace, or class unknown/unhandled */
default:
err = -ENOENT;
break;
}
return err;
}
| gpl-2.0 |
whdghks913/android_kernel_samsung_c1ktt | arch/um/kernel/physmem.c | 4701 | 5397 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "linux/bootmem.h"
#include "linux/mm.h"
#include "linux/pfn.h"
#include "asm/page.h"
#include "as-layout.h"
#include "init.h"
#include "kern.h"
#include "mem_user.h"
#include "os.h"
static int physmem_fd = -1;
/* Changed during early boot */
unsigned long high_physmem;
extern unsigned long long physmem_size;
int __init init_maps(unsigned long physmem, unsigned long iomem,
unsigned long highmem)
{
struct page *p, *map;
unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
unsigned long iomem_len, iomem_pages, total_len, total_pages;
int i;
phys_pages = physmem >> PAGE_SHIFT;
phys_len = phys_pages * sizeof(struct page);
iomem_pages = iomem >> PAGE_SHIFT;
iomem_len = iomem_pages * sizeof(struct page);
highmem_pages = highmem >> PAGE_SHIFT;
highmem_len = highmem_pages * sizeof(struct page);
total_pages = phys_pages + iomem_pages + highmem_pages;
total_len = phys_len + iomem_len + highmem_len;
map = alloc_bootmem_low_pages(total_len);
if (map == NULL)
return -ENOMEM;
for (i = 0; i < total_pages; i++) {
p = &map[i];
memset(p, 0, sizeof(struct page));
SetPageReserved(p);
INIT_LIST_HEAD(&p->lru);
}
max_mapnr = total_pages;
return 0;
}
void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
int r, int w, int x)
{
__u64 offset;
int fd, err;
fd = phys_mapping(phys, &offset);
err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
if (err) {
if (err == -ENOMEM)
printk(KERN_ERR "try increasing the host's "
"/proc/sys/vm/max_map_count to <physical "
"memory size>/4096\n");
panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
"err = %d\n", virt, fd, offset, len, r, w, x, err);
}
}
extern int __syscall_stub_start;
void __init setup_physmem(unsigned long start, unsigned long reserve_end,
unsigned long len, unsigned long long highmem)
{
unsigned long reserve = reserve_end - start;
int pfn = PFN_UP(__pa(reserve_end));
int delta = (len - reserve) >> PAGE_SHIFT;
int err, offset, bootmap_size;
physmem_fd = create_mem_file(len + highmem);
offset = uml_reserved - uml_physmem;
err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
len - offset, 1, 1, 1);
if (err < 0) {
printf("setup_physmem - mapping %ld bytes of memory at 0x%p "
"failed - errno = %d\n", len - offset,
(void *) uml_reserved, err);
exit(1);
}
/*
* Special kludge - This page will be mapped in to userspace processes
* from physmem_fd, so it needs to be written out there.
*/
os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);
bootmap_size = init_bootmem(pfn, pfn + delta);
free_bootmem(__pa(reserve_end) + bootmap_size,
len - bootmap_size - reserve);
}
int phys_mapping(unsigned long phys, unsigned long long *offset_out)
{
int fd = -1;
if (phys < physmem_size) {
fd = physmem_fd;
*offset_out = phys;
}
else if (phys < __pa(end_iomem)) {
struct iomem_region *region = iomem_regions;
while (region != NULL) {
if ((phys >= region->phys) &&
(phys < region->phys + region->size)) {
fd = region->fd;
*offset_out = phys - region->phys;
break;
}
region = region->next;
}
}
else if (phys < __pa(end_iomem) + highmem) {
fd = physmem_fd;
*offset_out = phys - iomem_size;
}
return fd;
}
static int __init uml_mem_setup(char *line, int *add)
{
char *retptr;
physmem_size = memparse(line,&retptr);
return 0;
}
__uml_setup("mem=", uml_mem_setup,
"mem=<Amount of desired ram>\n"
" This controls how much \"physical\" memory the kernel allocates\n"
" for the system. The size is specified as a number followed by\n"
" one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
" This is not related to the amount of memory in the host. It can\n"
" be more, and the excess, if it's ever used, will just be swapped out.\n"
" Example: mem=64M\n\n"
);
extern int __init parse_iomem(char *str, int *add);
__uml_setup("iomem=", parse_iomem,
"iomem=<name>,<file>\n"
" Configure <file> as an IO memory region named <name>.\n\n"
);
/*
* This list is constructed in parse_iomem and addresses filled in in
* setup_iomem, both of which run during early boot. Afterwards, it's
* unchanged.
*/
struct iomem_region *iomem_regions;
/* Initialized in parse_iomem and unchanged thereafter */
int iomem_size;
unsigned long find_iomem(char *driver, unsigned long *len_out)
{
struct iomem_region *region = iomem_regions;
while (region != NULL) {
if (!strcmp(region->driver, driver)) {
*len_out = region->size;
return region->virt;
}
region = region->next;
}
return 0;
}
static int setup_iomem(void)
{
struct iomem_region *region = iomem_regions;
unsigned long iomem_start = high_physmem + PAGE_SIZE;
int err;
while (region != NULL) {
err = os_map_memory((void *) iomem_start, region->fd, 0,
region->size, 1, 1, 0);
if (err)
printk(KERN_ERR "Mapping iomem region for driver '%s' "
"failed, errno = %d\n", region->driver, -err);
else {
region->virt = iomem_start;
region->phys = __pa(region->virt);
}
iomem_start += region->size + PAGE_SIZE;
region = region->next;
}
return 0;
}
__initcall(setup_iomem);
| gpl-2.0 |
sjp333/android_kernel_htc_leo | arch/um/os-Linux/signal.c | 4701 | 6588 | /*
* Copyright (C) 2004 PathScale, Inc
* Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <stdlib.h>
#include <stdarg.h>
#include <errno.h>
#include <signal.h>
#include <strings.h>
#include "as-layout.h"
#include "kern_util.h"
#include "os.h"
#include "process.h"
#include "sysdep/barrier.h"
#include "sysdep/sigcontext.h"
#include "user.h"
/* Copied from linux/compiler-gcc.h since we can't include it directly */
#define barrier() __asm__ __volatile__("": : :"memory")
void (*sig_info[NSIG])(int, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
[SIGFPE] = relay_signal,
[SIGILL] = relay_signal,
[SIGWINCH] = winch,
[SIGBUS] = bus_handler,
[SIGSEGV] = segv_handler,
[SIGIO] = sigio_handler,
[SIGVTALRM] = timer_handler };
static void sig_handler_common(int sig, struct sigcontext *sc)
{
struct uml_pt_regs r;
int save_errno = errno;
r.is_user = 0;
if (sig == SIGSEGV) {
/* For segfaults, we want the data from the sigcontext. */
copy_sc(&r, sc);
GET_FAULTINFO_FROM_SC(r.faultinfo, sc);
}
/* enable signals if sig isn't IRQ signal */
if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM))
unblock_signals();
(*sig_info[sig])(sig, &r);
errno = save_errno;
}
/*
* These are the asynchronous signals. SIGPROF is excluded because we want to
* be able to profile all of UML, not just the non-critical sections. If
* profiling is not thread-safe, then that is not my problem. We can disable
* profiling when SMP is enabled in that case.
*/
#define SIGIO_BIT 0
#define SIGIO_MASK (1 << SIGIO_BIT)
#define SIGVTALRM_BIT 1
#define SIGVTALRM_MASK (1 << SIGVTALRM_BIT)
static int signals_enabled;
static unsigned int signals_pending;
void sig_handler(int sig, struct sigcontext *sc)
{
int enabled;
enabled = signals_enabled;
if (!enabled && (sig == SIGIO)) {
signals_pending |= SIGIO_MASK;
return;
}
block_signals();
sig_handler_common(sig, sc);
set_signals(enabled);
}
static void real_alarm_handler(struct sigcontext *sc)
{
struct uml_pt_regs regs;
if (sc != NULL)
copy_sc(®s, sc);
regs.is_user = 0;
unblock_signals();
timer_handler(SIGVTALRM, ®s);
}
void alarm_handler(int sig, struct sigcontext *sc)
{
int enabled;
enabled = signals_enabled;
if (!signals_enabled) {
signals_pending |= SIGVTALRM_MASK;
return;
}
block_signals();
real_alarm_handler(sc);
set_signals(enabled);
}
void timer_init(void)
{
set_handler(SIGVTALRM, (__sighandler_t) alarm_handler,
SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, -1);
}
void set_sigstack(void *sig_stack, int size)
{
stack_t stack = ((stack_t) { .ss_flags = 0,
.ss_sp = (__ptr_t) sig_stack,
.ss_size = size - sizeof(void *) });
if (sigaltstack(&stack, NULL) != 0)
panic("enabling signal stack failed, errno = %d\n", errno);
}
static void (*handlers[_NSIG])(int sig, struct sigcontext *sc);
void handle_signal(int sig, struct sigcontext *sc)
{
unsigned long pending = 1UL << sig;
do {
int nested, bail;
/*
* pending comes back with one bit set for each
* interrupt that arrived while setting up the stack,
* plus a bit for this interrupt, plus the zero bit is
* set if this is a nested interrupt.
* If bail is true, then we interrupted another
* handler setting up the stack. In this case, we
* have to return, and the upper handler will deal
* with this interrupt.
*/
bail = to_irq_stack(&pending);
if (bail)
return;
nested = pending & 1;
pending &= ~1;
while ((sig = ffs(pending)) != 0){
sig--;
pending &= ~(1 << sig);
(*handlers[sig])(sig, sc);
}
/*
* Again, pending comes back with a mask of signals
* that arrived while tearing down the stack. If this
* is non-zero, we just go back, set up the stack
* again, and handle the new interrupts.
*/
if (!nested)
pending = from_irq_stack(nested);
} while (pending);
}
extern void hard_handler(int sig);
void set_handler(int sig, void (*handler)(int), int flags, ...)
{
struct sigaction action;
va_list ap;
sigset_t sig_mask;
int mask;
handlers[sig] = (void (*)(int, struct sigcontext *)) handler;
action.sa_handler = hard_handler;
sigemptyset(&action.sa_mask);
va_start(ap, flags);
while ((mask = va_arg(ap, int)) != -1)
sigaddset(&action.sa_mask, mask);
va_end(ap);
if (sig == SIGSEGV)
flags |= SA_NODEFER;
action.sa_flags = flags;
action.sa_restorer = NULL;
if (sigaction(sig, &action, NULL) < 0)
panic("sigaction failed - errno = %d\n", errno);
sigemptyset(&sig_mask);
sigaddset(&sig_mask, sig);
if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
panic("sigprocmask failed - errno = %d\n", errno);
}
int change_sig(int signal, int on)
{
sigset_t sigset;
sigemptyset(&sigset);
sigaddset(&sigset, signal);
if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0)
return -errno;
return 0;
}
void block_signals(void)
{
signals_enabled = 0;
/*
* This must return with signals disabled, so this barrier
* ensures that writes are flushed out before the return.
* This might matter if gcc figures out how to inline this and
* decides to shuffle this code into the caller.
*/
barrier();
}
void unblock_signals(void)
{
int save_pending;
if (signals_enabled == 1)
return;
/*
* We loop because the IRQ handler returns with interrupts off. So,
* interrupts may have arrived and we need to re-enable them and
* recheck signals_pending.
*/
while (1) {
/*
* Save and reset save_pending after enabling signals. This
* way, signals_pending won't be changed while we're reading it.
*/
signals_enabled = 1;
/*
* Setting signals_enabled and reading signals_pending must
* happen in this order.
*/
barrier();
save_pending = signals_pending;
if (save_pending == 0)
return;
signals_pending = 0;
/*
* We have pending interrupts, so disable signals, as the
* handlers expect them off when they are called. They will
* be enabled again above.
*/
signals_enabled = 0;
/*
* Deal with SIGIO first because the alarm handler might
* schedule, leaving the pending SIGIO stranded until we come
* back here.
*/
if (save_pending & SIGIO_MASK)
sig_handler_common(SIGIO, NULL);
if (save_pending & SIGVTALRM_MASK)
real_alarm_handler(NULL);
}
}
int get_signals(void)
{
return signals_enabled;
}
int set_signals(int enable)
{
int ret;
if (signals_enabled == enable)
return enable;
ret = signals_enabled;
if (enable)
unblock_signals();
else block_signals();
return ret;
}
| gpl-2.0 |
flar2/m7-GPE-5.0.1 | arch/arm/mach-pxa/irq.c | 4957 | 4480 | /*
* linux/arch/arm/mach-pxa/irq.c
*
* Generic PXA IRQ handling
*
* Author: Nicolas Pitre
* Created: Jun 15, 2001
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <asm/exception.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include "generic.h"
#define IRQ_BASE io_p2v(0x40d00000)
#define ICIP (0x000)
#define ICMR (0x004)
#define ICLR (0x008)
#define ICFR (0x00c)
#define ICPR (0x010)
#define ICCR (0x014)
#define ICHP (0x018)
#define IPR(i) (((i) < 32) ? (0x01c + ((i) << 2)) : \
((i) < 64) ? (0x0b0 + (((i) - 32) << 2)) : \
(0x144 + (((i) - 64) << 2)))
#define ICHP_VAL_IRQ (1 << 31)
#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
#define IPR_VALID (1 << 31)
#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
#define MAX_INTERNAL_IRQS 128
/*
* This is for peripheral IRQs internal to the PXA chip.
*/
static int pxa_internal_irq_nr;
static inline int cpu_has_ipr(void)
{
return !cpu_is_pxa25x();
}
static inline void __iomem *irq_base(int i)
{
static unsigned long phys_base[] = {
0x40d00000,
0x40d0009c,
0x40d00130,
};
return io_p2v(phys_base[i]);
}
void pxa_mask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
uint32_t icmr = __raw_readl(base + ICMR);
icmr &= ~(1 << IRQ_BIT(d->irq));
__raw_writel(icmr, base + ICMR);
}
void pxa_unmask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
uint32_t icmr = __raw_readl(base + ICMR);
icmr |= 1 << IRQ_BIT(d->irq);
__raw_writel(icmr, base + ICMR);
}
static struct irq_chip pxa_internal_irq_chip = {
.name = "SC",
.irq_ack = pxa_mask_irq,
.irq_mask = pxa_mask_irq,
.irq_unmask = pxa_unmask_irq,
};
asmlinkage void __exception_irq_entry icip_handle_irq(struct pt_regs *regs)
{
uint32_t icip, icmr, mask;
do {
icip = __raw_readl(IRQ_BASE + ICIP);
icmr = __raw_readl(IRQ_BASE + ICMR);
mask = icip & icmr;
if (mask == 0)
break;
handle_IRQ(PXA_IRQ(fls(mask) - 1), regs);
} while (1);
}
asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
{
uint32_t ichp;
do {
__asm__ __volatile__("mrc p6, 0, %0, c5, c0, 0\n": "=r"(ichp));
if ((ichp & ICHP_VAL_IRQ) == 0)
break;
handle_IRQ(PXA_IRQ(ICHP_IRQ(ichp)), regs);
} while (1);
}
void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
{
int irq, i, n;
BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
pxa_internal_irq_nr = irq_nr;
for (n = 0; n < irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5);
__raw_writel(0, base + ICMR); /* disable all IRQs */
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
/* initialize interrupt priority */
if (cpu_has_ipr())
__raw_writel(i | IPR_VALID, IRQ_BASE + IPR(i));
irq = PXA_IRQ(i);
irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, base);
set_irq_flags(irq, IRQF_VALID);
}
}
/* only unmasked interrupts kick us out of idle */
__raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn;
}
#ifdef CONFIG_PM
static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
static int pxa_irq_suspend(void)
{
int i;
for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
void __iomem *base = irq_base(i);
saved_icmr[i] = __raw_readl(base + ICMR);
__raw_writel(0, base + ICMR);
}
if (cpu_has_ipr()) {
for (i = 0; i < pxa_internal_irq_nr; i++)
saved_ipr[i] = __raw_readl(IRQ_BASE + IPR(i));
}
return 0;
}
static void pxa_irq_resume(void)
{
int i;
for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
void __iomem *base = irq_base(i);
__raw_writel(saved_icmr[i], base + ICMR);
__raw_writel(0, base + ICLR);
}
if (cpu_has_ipr())
for (i = 0; i < pxa_internal_irq_nr; i++)
__raw_writel(saved_ipr[i], IRQ_BASE + IPR(i));
__raw_writel(1, IRQ_BASE + ICCR);
}
#else
#define pxa_irq_suspend NULL
#define pxa_irq_resume NULL
#endif
struct syscore_ops pxa_irq_syscore_ops = {
.suspend = pxa_irq_suspend,
.resume = pxa_irq_resume,
};
| gpl-2.0 |
mastero9017/Blu_Spark | fs/xfs/xfs_trace.c | 5469 | 1635 | /*
* Copyright (c) 2009, Christoph Hellwig
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_mount.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_log_priv.h"
#include "xfs_buf_item.h"
#include "xfs_quota.h"
#include "xfs_iomap.h"
#include "xfs_aops.h"
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
#include "xfs_log_recover.h"
#include "xfs_inode_item.h"
/*
* We include this last to have the helpers above available for the trace
* event implementations.
*/
#define CREATE_TRACE_POINTS
#include "xfs_trace.h"
| gpl-2.0 |
drowningchild/msm-2.6.38 | drivers/gpu/drm/nouveau/nv30_fb.c | 8285 | 3258 | /*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
void
nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
tile->addr = addr | 1;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
}
void
nv30_fb_free_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
tile->addr = tile->limit = tile->pitch = 0;
}
static int
calc_bias(struct drm_device *dev, int k, int i, int j)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int b = (dev_priv->chipset > 0x30 ?
nv_rd32(dev, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
0) & 0xf;
return 2 * (b & 0x8 ? b - 0x10 : b);
}
static int
calc_ref(struct drm_device *dev, int l, int k, int i)
{
int j, x = 0;
for (j = 0; j < 4; j++) {
int m = (l >> (8 * i) & 0xff) + calc_bias(dev, k, i, j);
x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
}
return x;
}
int
nv30_fb_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i, j;
pfb->num_tiles = NV10_PFB_TILE__SIZE;
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
pfb->set_tile_region(dev, i);
/* Init the memory timing regs at 0x10037c/0x1003ac */
if (dev_priv->chipset == 0x30 ||
dev_priv->chipset == 0x31 ||
dev_priv->chipset == 0x35) {
/* Related to ROP count */
int n = (dev_priv->chipset == 0x31 ? 2 : 4);
int l = nv_rd32(dev, 0x1003d0);
for (i = 0; i < n; i++) {
for (j = 0; j < 3; j++)
nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j,
calc_ref(dev, l, 0, j));
for (j = 0; j < 2; j++)
nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j,
calc_ref(dev, l, 1, j));
}
}
return 0;
}
void
nv30_fb_takedown(struct drm_device *dev)
{
}
| gpl-2.0 |
gandalfk7/arietta_kernel3.18.1 | arch/arm/mach-footbridge/isa-rtc.c | 13661 | 1229 | /*
* arch/arm/mach-footbridge/isa-rtc.c
*
* Copyright (C) 1998 Russell King.
* Copyright (C) 1998 Phil Blundell
*
* CATS has a real-time clock, though the evaluation board doesn't.
*
* Changelog:
* 21-Mar-1998 RMK Created
* 27-Aug-1998 PJB CATS support
* 28-Dec-1998 APH Made leds optional
* 20-Jan-1999 RMK Started merge of EBSA285, CATS and NetWinder
* 16-Mar-1999 RMK More support for EBSA285-like machines with RTCs in
*/
#define RTC_PORT(x) (0x70+(x))
#define RTC_ALWAYS_BCD 0
#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/bcd.h>
#include <linux/io.h>
#include "common.h"
void __init isa_rtc_init(void)
{
int reg_d, reg_b;
/*
* Probe for the RTC.
*/
reg_d = CMOS_READ(RTC_REG_D);
/*
* make sure the divider is set
*/
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_REG_A);
/*
* Set control reg B
* (24 hour mode, update enabled)
*/
reg_b = CMOS_READ(RTC_REG_B) & 0x7f;
reg_b |= 2;
CMOS_WRITE(reg_b, RTC_REG_B);
if ((CMOS_READ(RTC_REG_A) & 0x7f) == RTC_REF_CLCK_32KHZ &&
CMOS_READ(RTC_REG_B) == reg_b) {
/*
* We have a RTC. Check the battery
*/
if ((reg_d & 0x80) == 0)
printk(KERN_WARNING "RTC: *** warning: CMOS battery bad\n");
}
}
| gpl-2.0 |
mozilla-b2g/kernel_flatfish | arch/arm/plat-s5p/irq-eint.c | 94 | 5252 | /* linux/arch/arm/plat-s5p/irq-eint.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* S5P - IRQ EINT support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/gpio.h>
#include <asm/hardware/vic.h>
#include <plat/regs-irqtype.h>
#include <mach/map.h>
#include <plat/cpu.h>
#include <plat/pm.h>
#include <plat/gpio-cfg.h>
#include <mach/regs-gpio.h>
static inline void s5p_irq_eint_mask(struct irq_data *data)
{
u32 mask;
mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
mask |= eint_irq_to_bit(data->irq);
__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
}
static void s5p_irq_eint_unmask(struct irq_data *data)
{
u32 mask;
mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
mask &= ~(eint_irq_to_bit(data->irq));
__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
}
static inline void s5p_irq_eint_ack(struct irq_data *data)
{
__raw_writel(eint_irq_to_bit(data->irq),
S5P_EINT_PEND(EINT_REG_NR(data->irq)));
}
static void s5p_irq_eint_maskack(struct irq_data *data)
{
/* compiler should in-line these */
s5p_irq_eint_mask(data);
s5p_irq_eint_ack(data);
}
static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type)
{
int offs = EINT_OFFSET(data->irq);
int shift;
u32 ctrl, mask;
u32 newvalue = 0;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
newvalue = S5P_IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
break;
case IRQ_TYPE_LEVEL_LOW:
newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
break;
case IRQ_TYPE_LEVEL_HIGH:
newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
break;
default:
printk(KERN_ERR "No such irq type %d", type);
return -EINVAL;
}
shift = (offs & 0x7) * 4;
mask = 0x7 << shift;
ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
ctrl &= ~mask;
ctrl |= newvalue << shift;
__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
if ((0 <= offs) && (offs < 8))
s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
else if ((8 <= offs) && (offs < 16))
s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
else if ((16 <= offs) && (offs < 24))
s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
else if ((24 <= offs) && (offs < 32))
s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
else
printk(KERN_ERR "No such irq number %d", offs);
return 0;
}
static struct irq_chip s5p_irq_eint = {
.name = "s5p-eint",
.irq_mask = s5p_irq_eint_mask,
.irq_unmask = s5p_irq_eint_unmask,
.irq_mask_ack = s5p_irq_eint_maskack,
.irq_ack = s5p_irq_eint_ack,
.irq_set_type = s5p_irq_eint_set_type,
#ifdef CONFIG_PM
.irq_set_wake = s3c_irqext_wake,
#endif
};
/* s5p_irq_demux_eint
*
* This function demuxes the IRQ from the group0 external interrupts,
* from EINTs 16 to 31. It is designed to be inlined into the specific
* handler s5p_irq_demux_eintX_Y.
*
* Each EINT pend/mask registers handle eight of them.
*/
static inline void s5p_irq_demux_eint(unsigned int start)
{
u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
unsigned int irq;
status &= ~mask;
status &= 0xff;
while (status) {
irq = fls(status) - 1;
generic_handle_irq(irq + start);
status &= ~(1 << irq);
}
}
static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
{
s5p_irq_demux_eint(IRQ_EINT(16));
s5p_irq_demux_eint(IRQ_EINT(24));
}
static inline void s5p_irq_vic_eint_mask(struct irq_data *data)
{
void __iomem *base = irq_data_get_irq_chip_data(data);
s5p_irq_eint_mask(data);
writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE_CLEAR);
}
static void s5p_irq_vic_eint_unmask(struct irq_data *data)
{
void __iomem *base = irq_data_get_irq_chip_data(data);
s5p_irq_eint_unmask(data);
writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE);
}
static inline void s5p_irq_vic_eint_ack(struct irq_data *data)
{
__raw_writel(eint_irq_to_bit(data->irq),
S5P_EINT_PEND(EINT_REG_NR(data->irq)));
}
static void s5p_irq_vic_eint_maskack(struct irq_data *data)
{
s5p_irq_vic_eint_mask(data);
s5p_irq_vic_eint_ack(data);
}
static struct irq_chip s5p_irq_vic_eint = {
.name = "s5p_vic_eint",
.irq_mask = s5p_irq_vic_eint_mask,
.irq_unmask = s5p_irq_vic_eint_unmask,
.irq_mask_ack = s5p_irq_vic_eint_maskack,
.irq_ack = s5p_irq_vic_eint_ack,
.irq_set_type = s5p_irq_eint_set_type,
#ifdef CONFIG_PM
.irq_set_wake = s3c_irqext_wake,
#endif
};
int __init s5p_init_irq_eint(void)
{
int irq;
for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
irq_set_chip(irq, &s5p_irq_vic_eint);
for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
return 0;
}
arch_initcall(s5p_init_irq_eint);
| gpl-2.0 |
shigio/ctags | Units/parser-fortran.r/bug877956.f90.d/input.f90 | 94 | 1782 | ! Bugs item #877956, was opened at 2004-01-15 17:59
! Message generated for change (Tracker Item Submitted) made by Item Submitter
! You can respond by visiting:
! https://sourceforge.net/tracker/?func=detail&atid=106556&aid=877956&group_id=6556
!
! Category: None
! Group: None
! Status: Open
! Resolution: None
! Priority: 5
! Submitted By: Randy Hood (randy762)
! Assigned to: Nobody/Anonymous (nobody)
! Summary: Broken Fortran variable listing after =-1
!
! Initial Comment:
! When I run ctags v5.5.2 on Redhat Linux 9 with the command
!
! ctags --Fortran-kinds=v -x test.f90
!
! where test.f90 is
! ----------------------------------
PROGRAM test
IMPLICIT NONE
INTEGER :: cm1 =-1, c2 = 2
END PROGRAM test
! -------------------------------------
!
! I only get this one line of output
!
! cm1 variable 4 test.f90 INTEGER :: cm1 =-1, c2 = 2
!
! If I change one line of test.f90 so that it is now
! ----------------------------------------
PROGRAM test
IMPLICIT NONE
INTEGER :: cm1 = -1, c2 = 2
END PROGRAM test
! -----------------------------------------
! and run the command
!
! ctags --Fortran-kinds=v -x test.f90
!
! I get this correct output
!
! c2 variable 4 test.f90 INTEGER :: cm1 = -1, c2 = 2
! cm1 variable 4 test.f90 INTEGER :: cm1 = -1, c2 = 2
!
! ----------------------------------------------------------------------
! You can respond by visiting:
! https://sourceforge.net/tracker/?func=detail&atid=106556&aid=877956&group_id=6556
| gpl-2.0 |
ajaragoneses/GTI_VLC | src/android/dirs.c | 94 | 1892 | /*****************************************************************************
* dirs.c: Android directories configuration
*****************************************************************************
* Copyright © 2012 Rafaël Carré
*
* Authors: Rafaël Carré <funman@videolanorg>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <vlc_common.h>
#include "config/configuration.h"
#include <string.h>
char *config_GetUserDir (vlc_userdir_t type)
{
switch (type)
{
case VLC_DATA_DIR:
return strdup("/sdcard/Android/data/org.videolan.vlc");
case VLC_CACHE_DIR:
return strdup("/sdcard/Android/data/org.videolan.vlc/cache");
case VLC_HOME_DIR:
case VLC_CONFIG_DIR:
return NULL;
case VLC_DESKTOP_DIR:
case VLC_DOWNLOAD_DIR:
case VLC_TEMPLATES_DIR:
case VLC_PUBLICSHARE_DIR:
case VLC_DOCUMENTS_DIR:
case VLC_MUSIC_DIR:
case VLC_PICTURES_DIR:
case VLC_VIDEOS_DIR:
return NULL;
}
return NULL;
}
| gpl-2.0 |
bbelos/LS970_Kernel | arch/arm/mach-msm/qdsp6v2/ultrasound/q6usm.c | 94 | 29422 | /* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <sound/apr_audio.h>
#include "q6usm.h"
/* The driver version*/
#define DRV_VERSION "1.2"
#define SESSION_MAX 0x02 /* aDSP:USM limit */
#define READDONE_IDX_STATUS 0
#define READDONE_IDX_BUFFER 1
#define READDONE_IDX_SIZE 2
#define READDONE_IDX_OFFSET 3
#define READDONE_IDX_MSW_TS 4
#define READDONE_IDX_LSW_TS 5
#define READDONE_IDX_FLAGS 6
#define READDONE_IDX_NUMFRAMES 7
#define READDONE_IDX_ID 8
#define WRITEDONE_IDX_STATUS 0
/* Standard timeout in the asynchronous ops */
#define Q6USM_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
static DEFINE_MUTEX(session_lock);
static struct us_client *session[SESSION_MAX];
static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv);
static int32_t q6usm_callback(struct apr_client_data *data, void *priv);
static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
uint32_t pkt_size, bool cmd_flg);
struct usm_mmap {
atomic_t ref_cnt;
atomic_t cmd_state;
wait_queue_head_t cmd_wait;
void *apr;
};
static struct usm_mmap this_mmap;
static int q6usm_session_alloc(struct us_client *usc)
{
int ind = 0;
mutex_lock(&session_lock);
for (ind = 0; ind < SESSION_MAX; ++ind) {
if (!session[ind]) {
session[ind] = usc;
mutex_unlock(&session_lock);
++ind; /* session id: 0 reserved */
pr_debug("%s: session[%d] was allocated\n",
__func__, ind);
return ind;
}
}
mutex_unlock(&session_lock);
return -ENOMEM;
}
static void q6usm_session_free(struct us_client *usc)
{
/* Session index was incremented during allocation */
uint16_t ind = (uint16_t)usc->session - 1;
pr_debug("%s: to free session[%d]\n", __func__, ind);
if (ind < SESSION_MAX) {
mutex_lock(&session_lock);
session[ind] = 0;
mutex_unlock(&session_lock);
}
}
int q6usm_us_client_buf_free(unsigned int dir,
struct us_client *usc)
{
struct us_port_data *port;
int rc = 0;
uint32_t size = 0;
if ((usc == NULL) ||
((dir != IN) && (dir != OUT)))
return -EINVAL;
mutex_lock(&usc->cmd_lock);
port = &usc->port[dir];
if (port == NULL) {
mutex_unlock(&usc->cmd_lock);
return -EINVAL;
}
if (port->data == NULL) {
mutex_unlock(&usc->cmd_lock);
return 0;
}
rc = q6usm_memory_unmap(usc, port->phys, dir);
if (rc)
pr_err("%s: CMD Memory_unmap* failed\n", __func__);
pr_debug("%s: data[%p]phys[%p][%p]\n", __func__,
(void *)port->data, (void *)port->phys, (void *)&port->phys);
size = port->buf_size * port->buf_cnt;
dma_free_coherent(NULL, size, port->data, port->phys);
port->data = NULL;
port->phys = 0;
port->buf_size = 0;
port->buf_cnt = 0;
mutex_unlock(&usc->cmd_lock);
return 0;
}
void q6usm_us_client_free(struct us_client *usc)
{
int loopcnt = 0;
struct us_port_data *port;
if ((usc == NULL) ||
!(usc->session))
return;
for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) {
port = &usc->port[loopcnt];
if (port->data == NULL)
continue;
pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
q6usm_us_client_buf_free(loopcnt, usc);
}
q6usm_session_free(usc);
apr_deregister(usc->apr);
pr_debug("%s: APR De-Register\n", __func__);
if (atomic_read(&this_mmap.ref_cnt) <= 0) {
pr_err("%s: APR Common Port Already Closed\n", __func__);
goto done;
}
atomic_dec(&this_mmap.ref_cnt);
if (atomic_read(&this_mmap.ref_cnt) == 0) {
apr_deregister(this_mmap.apr);
pr_debug("%s: APR De-Register common port\n", __func__);
}
done:
kfree(usc);
pr_debug("%s:\n", __func__);
return;
}
struct us_client *q6usm_us_client_alloc(
void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
void *priv)
{
struct us_client *usc;
int n;
int lcnt = 0;
usc = kzalloc(sizeof(struct us_client), GFP_KERNEL);
if (usc == NULL)
return NULL;
n = q6usm_session_alloc(usc);
if (n <= 0)
goto fail_session;
usc->session = n;
usc->cb = cb;
usc->priv = priv;
usc->apr = apr_register("ADSP", "USM", \
(apr_fn)q6usm_callback,\
((usc->session) << 8 | 0x0001),\
usc);
if (usc->apr == NULL) {
pr_err("%s: Registration with APR failed\n", __func__);
goto fail;
}
pr_debug("%s: Registering the common port with APR\n", __func__);
if (atomic_read(&this_mmap.ref_cnt) == 0) {
this_mmap.apr = apr_register("ADSP", "USM",
(apr_fn)q6usm_mmapcallback,
0x0FFFFFFFF, &this_mmap);
if (this_mmap.apr == NULL) {
pr_err("%s: USM port registration failed\n",
__func__);
goto fail;
}
}
atomic_inc(&this_mmap.ref_cnt);
init_waitqueue_head(&usc->cmd_wait);
mutex_init(&usc->cmd_lock);
for (lcnt = 0; lcnt <= OUT; ++lcnt) {
mutex_init(&usc->port[lcnt].lock);
spin_lock_init(&usc->port[lcnt].dsp_lock);
}
atomic_set(&usc->cmd_state, 0);
return usc;
fail:
q6usm_us_client_free(usc);
return NULL;
fail_session:
kfree(usc);
return NULL;
}
int q6usm_us_client_buf_alloc(unsigned int dir,
struct us_client *usc,
unsigned int bufsz,
unsigned int bufcnt)
{
int rc = 0;
struct us_port_data *port = NULL;
unsigned int size = bufsz*bufcnt;
if ((usc == NULL) ||
((dir != IN) && (dir != OUT)) || (size == 0) ||
(usc->session <= 0 || usc->session > SESSION_MAX)) {
pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n",
__func__, size, bufcnt);
return -EINVAL;
}
mutex_lock(&usc->cmd_lock);
port = &usc->port[dir];
port->data = dma_alloc_coherent(NULL, size, &(port->phys), GFP_KERNEL);
if (port->data == NULL) {
pr_err("%s: US region allocation failed\n", __func__);
mutex_unlock(&usc->cmd_lock);
return -ENOMEM;
}
port->buf_cnt = bufcnt;
port->buf_size = bufsz;
pr_debug("%s: data[%p]; phys[%p]; [%p]\n", __func__,
(void *)port->data,
(void *)port->phys,
(void *)&port->phys);
rc = q6usm_memory_map(usc, port->phys, dir, size, 1);
if (rc < 0) {
pr_err("%s: CMD Memory_map failed\n", __func__);
mutex_unlock(&usc->cmd_lock);
q6usm_us_client_buf_free(dir, usc);
} else {
mutex_unlock(&usc->cmd_lock);
rc = 0;
}
return rc;
}
static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv)
{
uint32_t token;
uint32_t *payload = data->payload;
pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x];"
"token[0x%x]; payload_s[%d]; src[%d]; dest[%d];\n",
__func__, payload[0], payload[1], data->opcode, data->token,
data->payload_size, data->src_port, data->dest_port);
if (data->opcode == APR_BASIC_RSP_RESULT) {
/* status field check */
if (payload[1]) {
pr_err("%s: wrong response[%d] on cmd [%d]\n",
__func__, payload[1], payload[0]);
} else {
token = data->token;
switch (payload[0]) {
case USM_SESSION_CMD_MEMORY_MAP:
case USM_SESSION_CMD_MEMORY_UNMAP:
pr_debug("%s: cmd[0x%x]; result[0x%x]\n",
__func__, payload[0], payload[1]);
if (atomic_read(&this_mmap.cmd_state)) {
atomic_set(&this_mmap.cmd_state, 0);
wake_up(&this_mmap.cmd_wait);
}
break;
default:
pr_debug("%s: wrong command[0x%x]\n",
__func__, payload[0]);
break;
}
}
}
return 0;
}
static int32_t q6usm_callback(struct apr_client_data *data, void *priv)
{
struct us_client *usc = (struct us_client *)priv;
unsigned long dsp_flags;
uint32_t *payload = data->payload;
uint32_t token = data->token;
if (usc == NULL) {
pr_err("%s: client info is NULL\n", __func__);
return -EINVAL;
}
if (data->opcode == APR_BASIC_RSP_RESULT) {
/* status field check */
if (payload[1]) {
pr_err("%s: wrong response[%d] on cmd [%d]\n",
__func__, payload[1], payload[0]);
if (usc->cb)
usc->cb(data->opcode, token,
(uint32_t *)data->payload, usc->priv);
} else {
switch (payload[0]) {
case USM_SESSION_CMD_RUN:
case USM_STREAM_CMD_CLOSE:
if (token != usc->session) {
pr_err("%s: wrong token[%d]",
__func__, token);
break;
}
case USM_STREAM_CMD_OPEN_READ:
case USM_STREAM_CMD_OPEN_WRITE:
case USM_STREAM_CMD_SET_ENC_PARAM:
case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
if (atomic_read(&usc->cmd_state)) {
atomic_set(&usc->cmd_state, 0);
wake_up(&usc->cmd_wait);
}
if (usc->cb)
usc->cb(data->opcode, token,
(uint32_t *)data->payload,
usc->priv);
break;
default:
break;
}
}
return 0;
}
switch (data->opcode) {
case USM_DATA_EVENT_READ_DONE: {
struct us_port_data *port = &usc->port[OUT];
spin_lock_irqsave(&port->dsp_lock, dsp_flags);
if (payload[READDONE_IDX_STATUS]) {
pr_err("%s: wrong READDONE[%d]; token[%d]\n",
__func__,
payload[READDONE_IDX_STATUS],
token);
token = USM_WRONG_TOKEN;
spin_unlock_irqrestore(&port->dsp_lock,
dsp_flags);
break;
}
if (port->expected_token != token) {
u32 cpu_buf = port->cpu_buf;
pr_err("%s: expected[%d] != token[%d]\n",
__func__, port->expected_token, token);
pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n",
__func__, port->dsp_buf, cpu_buf);
token = USM_WRONG_TOKEN;
/* To prevent data handle continiue */
port->expected_token = USM_WRONG_TOKEN;
spin_unlock_irqrestore(&port->dsp_lock,
dsp_flags);
break;
} /* port->expected_token != data->token */
port->expected_token = token + 1;
if (port->expected_token == port->buf_cnt)
port->expected_token = 0;
/* gap support */
if (port->expected_token != port->cpu_buf) {
port->dsp_buf = port->expected_token;
token = port->dsp_buf; /* for callback */
} else
port->dsp_buf = token;
spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
break;
} /* case USM_DATA_EVENT_READ_DONE */
case USM_DATA_EVENT_WRITE_DONE: {
struct us_port_data *port = &usc->port[IN];
if (payload[WRITEDONE_IDX_STATUS]) {
pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n",
__func__,
payload[WRITEDONE_IDX_STATUS]);
break;
}
spin_lock_irqsave(&port->dsp_lock, dsp_flags);
port->dsp_buf = token + 1;
if (port->dsp_buf == port->buf_cnt)
port->dsp_buf = 0;
spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
break;
} /* case USM_DATA_EVENT_WRITE_DONE */
case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: {
pr_debug("%s: US detect result: result=%d",
__func__,
payload[0]);
break;
} /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */
default:
return 0;
} /* switch */
if (usc->cb)
usc->cb(data->opcode, token,
data->payload, usc->priv);
return 0;
}
uint32_t q6usm_get_ready_data(int dir, struct us_client *usc)
{
uint32_t ret = 0xffffffff;
if ((usc != NULL) && ((dir == IN) || (dir == OUT)))
ret = usc->port[dir].dsp_buf;
return ret;
}
uint32_t q6usm_get_virtual_address(int dir,
struct us_client *usc,
struct vm_area_struct *vms)
{
uint32_t ret = 0xffffffff;
if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) {
struct us_port_data *port = &usc->port[dir];
ret = dma_mmap_coherent(NULL, vms,
port->data, port->phys,
port->buf_size * port->buf_cnt);
}
return ret;
}
static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
uint32_t pkt_size, bool cmd_flg)
{
mutex_lock(&usc->cmd_lock);
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
APR_HDR_LEN(sizeof(struct apr_hdr)),\
APR_PKT_VER);
hdr->src_svc = ((struct apr_svc *)usc->apr)->id;
hdr->src_domain = APR_DOMAIN_APPS;
hdr->dest_svc = APR_SVC_USM;
hdr->dest_domain = APR_DOMAIN_ADSP;
hdr->src_port = (usc->session << 8) | 0x0001;
hdr->dest_port = (usc->session << 8) | 0x0001;
if (cmd_flg) {
hdr->token = usc->session;
atomic_set(&usc->cmd_state, 1);
}
hdr->pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, pkt_size);
mutex_unlock(&usc->cmd_lock);
return;
}
static void q6usm_add_mmaphdr(struct us_client *usc, struct apr_hdr *hdr,
uint32_t pkt_size, bool cmd_flg)
{
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
hdr->src_port = 0;
hdr->dest_port = 0;
if (cmd_flg) {
hdr->token = 0;
atomic_set(&this_mmap.cmd_state, 1);
}
hdr->pkt_size = pkt_size;
return;
}
static uint32_t q6usm_ext2int_format(uint32_t ext_format)
{
uint32_t int_format = INVALID_FORMAT;
switch (ext_format) {
case FORMAT_USPS_EPOS:
int_format = US_POINT_EPOS_FORMAT;
break;
case FORMAT_USRAW:
int_format = US_RAW_FORMAT;
break;
default:
pr_err("%s: Invalid format[%d]\n", __func__, ext_format);
break;
}
return int_format;
}
int q6usm_open_read(struct us_client *usc,
uint32_t format)
{
uint32_t int_format = INVALID_FORMAT;
int rc = 0x00;
struct usm_stream_cmd_open_read open;
pr_debug("%s: session[%d]", __func__, usc->session);
if ((usc == NULL) || (usc->apr == NULL)) {
pr_err("%s: client or its apr is NULL\n", __func__);
return -EINVAL;
}
q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
open.hdr.opcode = USM_STREAM_CMD_OPEN_READ;
open.src_endpoint = 0; /* AFE */
open.pre_proc_top = 0; /* No preprocessing required */
int_format = q6usm_ext2int_format(format);
if (int_format == INVALID_FORMAT)
return -EINVAL;
open.uMode = STREAM_PRIORITY_NORMAL;
open.format = int_format;
rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
if (rc < 0) {
pr_err("%s: open failed op[0x%x]rc[%d]\n",
__func__, open.hdr.opcode, rc);
goto fail_cmd;
}
rc = wait_event_timeout(usc->cmd_wait,
(atomic_read(&usc->cmd_state) == 0),
Q6USM_TIMEOUT_JIFFIES);
if (!rc) {
rc = -ETIME;
pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n",
__func__, rc);
goto fail_cmd;
} else
rc = 0;
fail_cmd:
return rc;
}
int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg* us_cfg)
{
uint32_t int_format = INVALID_FORMAT;
struct usm_stream_cmd_encdec_cfg_blk enc_cfg_obj;
struct usm_stream_cmd_encdec_cfg_blk *enc_cfg = &enc_cfg_obj;
int rc = 0;
uint32_t total_cfg_size =
sizeof(struct usm_stream_cmd_encdec_cfg_blk);
uint32_t round_params_size = 0;
uint8_t is_allocated = 0;
if ((usc == NULL) || (us_cfg == NULL)) {
pr_err("%s: wrong input", __func__);
return -EINVAL;
}
int_format = q6usm_ext2int_format(us_cfg->format_id);
if (int_format == INVALID_FORMAT) {
pr_err("%s: wrong input format[%d]",
__func__, us_cfg->format_id);
return -EINVAL;
}
/* Transparent configuration data is after enc_cfg */
/* Integer number of u32s is requred */
round_params_size = ((us_cfg->params_size + 3)/4) * 4;
if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
/* Dynamic allocated encdec_cfg_blk is required */
/* static part use */
round_params_size -= USM_MAX_CFG_DATA_SIZE;
total_cfg_size += round_params_size;
enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
if (enc_cfg == NULL) {
pr_err("%s: enc_cfg[%d] allocation failed\n",
__func__, total_cfg_size);
return -ENOMEM;
}
is_allocated = 1;
} else
round_params_size = 0;
q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size - APR_HDR_SIZE, true);
enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
round_params_size;
enc_cfg->enc_blk.frames_per_buf = 1;
enc_cfg->enc_blk.format_id = int_format;
enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
USM_MAX_CFG_DATA_SIZE +
round_params_size;
memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
sizeof(struct usm_cfg_common));
/* Transparent data copy */
memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
us_cfg->params_size);
pr_debug("%s: cfg_size[%d], params_size[%d]\n",
__func__,
enc_cfg->enc_blk.cfg_size,
us_cfg->params_size);
pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
__func__,
enc_cfg->enc_blk.transp_data[0],
enc_cfg->enc_blk.transp_data[1],
enc_cfg->enc_blk.transp_data[2],
enc_cfg->enc_blk.transp_data[3],
enc_cfg->enc_blk.transp_data[4],
enc_cfg->enc_blk.transp_data[5],
enc_cfg->enc_blk.transp_data[6],
enc_cfg->enc_blk.transp_data[7]
);
pr_debug("%s: srate:%d, ch=%d, bps= %d; dmap:0x%x; dev_id=0x%x\n",
__func__, enc_cfg->enc_blk.cfg_common.sample_rate,
enc_cfg->enc_blk.cfg_common.ch_cfg,
enc_cfg->enc_blk.cfg_common.bits_per_sample,
enc_cfg->enc_blk.cfg_common.data_map,
enc_cfg->enc_blk.cfg_common.dev_id);
rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
if (rc < 0) {
pr_err("%s:Comamnd open failed\n", __func__);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(usc->cmd_wait,
(atomic_read(&usc->cmd_state) == 0),
Q6USM_TIMEOUT_JIFFIES);
if (!rc) {
rc = -ETIME;
pr_err("%s: timeout opcode[0x%x]\n",
__func__, enc_cfg->hdr.opcode);
} else
rc = 0;
fail_cmd:
if (is_allocated == 1)
kfree(enc_cfg);
return rc;
}
int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
{
uint32_t int_format = INVALID_FORMAT;
struct usm_stream_media_format_update dec_cfg_obj;
struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;
int rc = 0;
uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
uint32_t round_params_size = 0;
uint8_t is_allocated = 0;
if ((usc == NULL) || (us_cfg == NULL)) {
pr_err("%s: wrong input", __func__);
return -EINVAL;
}
int_format = q6usm_ext2int_format(us_cfg->format_id);
if (int_format == INVALID_FORMAT) {
pr_err("%s: wrong input format[%d]",
__func__, us_cfg->format_id);
return -EINVAL;
}
/* Transparent configuration data is after enc_cfg */
/* Integer number of u32s is requred */
round_params_size = ((us_cfg->params_size + 3)/4) * 4;
if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
/* Dynamic allocated encdec_cfg_blk is required */
/* static part use */
round_params_size -= USM_MAX_CFG_DATA_SIZE;
total_cfg_size += round_params_size;
dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
if (dec_cfg == NULL) {
pr_err("%s:dec_cfg[%d] allocation failed\n",
__func__, total_cfg_size);
return -ENOMEM;
}
is_allocated = 1;
} else { /* static transp_data is enough */
round_params_size = 0;
}
q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size - APR_HDR_SIZE, true);
dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
dec_cfg->format_id = int_format;
dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
USM_MAX_CFG_DATA_SIZE +
round_params_size;
memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
sizeof(struct usm_cfg_common));
/* Transparent data copy */
memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
__func__,
dec_cfg->cfg_size,
us_cfg->params_size,
dec_cfg->transp_data[0],
dec_cfg->transp_data[1],
dec_cfg->transp_data[2],
dec_cfg->transp_data[3]
);
rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
if (rc < 0) {
pr_err("%s:Comamnd open failed\n", __func__);
rc = -EINVAL;
goto fail_cmd;
}
rc = wait_event_timeout(usc->cmd_wait,
(atomic_read(&usc->cmd_state) == 0),
Q6USM_TIMEOUT_JIFFIES);
if (!rc) {
rc = -ETIME;
pr_err("%s: timeout opcode[0x%x]\n",
__func__, dec_cfg->hdr.opcode);
} else
rc = 0;
fail_cmd:
if (is_allocated == 1)
kfree(dec_cfg);
return rc;
}
int q6usm_open_write(struct us_client *usc,
uint32_t format)
{
int rc = 0;
uint32_t int_format = INVALID_FORMAT;
struct usm_stream_cmd_open_write open;
pr_debug("%s: session[%d]", __func__, usc->session);
if ((usc == NULL) || (usc->apr == NULL)) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE;
int_format = q6usm_ext2int_format(format);
if (int_format == INVALID_FORMAT) {
pr_err("%s: wrong format[%d]", __func__, format);
return -EINVAL;
}
open.format = int_format;
rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
if (rc < 0) {
pr_err("%s:open failed op[0x%x]rc[%d]\n", \
__func__, open.hdr.opcode, rc);
goto fail_cmd;
}
rc = wait_event_timeout(usc->cmd_wait,
(atomic_read(&usc->cmd_state) == 0),
Q6USM_TIMEOUT_JIFFIES);
if (!rc) {
rc = -ETIME;
pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n",
__func__, rc);
goto fail_cmd;
} else
rc = 0;
fail_cmd:
return rc;
}
int q6usm_run(struct us_client *usc, uint32_t flags,
uint32_t msw_ts, uint32_t lsw_ts)
{
struct usm_stream_cmd_run run;
int rc = 0;
if ((usc == NULL) || (usc->apr == NULL)) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
q6usm_add_hdr(usc, &run.hdr, sizeof(run), true);
run.hdr.opcode = USM_SESSION_CMD_RUN;
run.flags = flags;
run.msw_ts = msw_ts;
run.lsw_ts = lsw_ts;
rc = apr_send_pkt(usc->apr, (uint32_t *) &run);
if (rc < 0) {
pr_err("%s: Commmand run failed[%d]\n", __func__, rc);
goto fail_cmd;
}
rc = wait_event_timeout(usc->cmd_wait,
(atomic_read(&usc->cmd_state) == 0),
Q6USM_TIMEOUT_JIFFIES);
if (!rc) {
rc = -ETIME;
pr_err("%s: timeout. waited for run success rc[%d]\n",
__func__, rc);
} else
rc = 0;
fail_cmd:
return rc;
}
int q6usm_memory_map(struct us_client *usc, uint32_t buf_add, int dir,
uint32_t bufsz, uint32_t bufcnt)
{
struct usm_stream_cmd_memory_map mem_map;
int rc = 0;
if ((usc == NULL) || (usc->apr == NULL) || (this_mmap.apr == NULL)) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
q6usm_add_mmaphdr(usc, &mem_map.hdr,
sizeof(struct usm_stream_cmd_memory_map), true);
mem_map.hdr.opcode = USM_SESSION_CMD_MEMORY_MAP;
mem_map.buf_add = buf_add;
mem_map.buf_size = bufsz * bufcnt;
mem_map.mempool_id = 0;
pr_debug("%s: buf add[%x] buf_add_parameter[%x]\n",
__func__, mem_map.buf_add, buf_add);
rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_map);
if (rc < 0) {
pr_err("%s: mem_map op[0x%x]rc[%d]\n",
__func__, mem_map.hdr.opcode, rc);
goto fail_cmd;
}
rc = wait_event_timeout(this_mmap.cmd_wait,
(atomic_read(&this_mmap.cmd_state) == 0),
Q6USM_TIMEOUT_JIFFIES);
if (!rc) {
rc = -ETIME;
pr_err("%s: timeout. waited for memory_map\n", __func__);
} else
rc = 0;
fail_cmd:
return rc;
}
int q6usm_memory_unmap(struct us_client *usc, uint32_t buf_add, int dir)
{
struct usm_stream_cmd_memory_unmap mem_unmap;
int rc = 0;
if ((usc == NULL) || (usc->apr == NULL) || (this_mmap.apr == NULL)) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
q6usm_add_mmaphdr(usc, &mem_unmap.hdr,
sizeof(struct usm_stream_cmd_memory_unmap), true);
mem_unmap.hdr.opcode = USM_SESSION_CMD_MEMORY_UNMAP;
mem_unmap.buf_add = buf_add;
rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
if (rc < 0) {
pr_err("%s:mem_unmap op[0x%x]rc[%d]\n",
__func__, mem_unmap.hdr.opcode, rc);
goto fail_cmd;
}
rc = wait_event_timeout(this_mmap.cmd_wait,
(atomic_read(&this_mmap.cmd_state) == 0),
Q6USM_TIMEOUT_JIFFIES);
if (!rc) {
rc = -ETIME;
pr_err("%s: timeout. waited for memory_map\n", __func__);
} else
rc = 0;
fail_cmd:
return rc;
}
int q6usm_read(struct us_client *usc, uint32_t read_ind)
{
struct usm_stream_cmd_read read;
struct us_port_data *port = NULL;
int rc = 0;
u32 read_counter = 0;
u32 loop_ind = 0;
if ((usc == NULL) || (usc->apr == NULL)) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
port = &usc->port[OUT];
if (read_ind > port->buf_cnt) {
pr_err("%s: wrong read_ind[%d]\n",
__func__, read_ind);
return -EINVAL;
}
if (read_ind == port->cpu_buf) {
pr_err("%s: no free region\n", __func__);
return 0;
}
if (read_ind > port->cpu_buf) { /* 1 range */
read_counter = read_ind - port->cpu_buf;
} else { /* 2 ranges */
read_counter = (port->buf_cnt - port->cpu_buf) + read_ind;
}
q6usm_add_hdr(usc, &read.hdr, (sizeof(read) - APR_HDR_SIZE), false);
read.hdr.opcode = USM_DATA_CMD_READ;
read.buf_size = port->buf_size;
for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) {
u32 temp_cpu_buf = port->cpu_buf;
read.buf_add = (uint32_t)(port->phys) +
port->buf_size * (port->cpu_buf);
read.uid = port->cpu_buf;
read.hdr.token = port->cpu_buf;
read.counter = 1;
++(port->cpu_buf);
if (port->cpu_buf == port->buf_cnt)
port->cpu_buf = 0;
rc = apr_send_pkt(usc->apr, (uint32_t *) &read);
if (rc < 0) {
port->cpu_buf = temp_cpu_buf;
pr_err("%s:read op[0x%x]rc[%d]\n",
__func__, read.hdr.opcode, rc);
break;
} else
rc = 0;
} /* bufs loop */
return rc;
}
int q6usm_write(struct us_client *usc, uint32_t write_ind)
{
int rc = 0;
struct usm_stream_cmd_write cmd_write;
struct us_port_data *port = NULL;
u32 current_dsp_buf = 0;
if ((usc == NULL) || (usc->apr == NULL)) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
port = &usc->port[IN];
current_dsp_buf = port->dsp_buf;
/* free region, caused by new dsp_buf report from DSP, */
/* can be only extended */
if (port->cpu_buf >= current_dsp_buf) {
/* 2 -part free region, including empty buffer */
if ((write_ind <= port->cpu_buf) &&
(write_ind > current_dsp_buf)) {
pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
__func__, write_ind,
current_dsp_buf, port->cpu_buf);
return -EINVAL;
}
} else {
/* 1 -part free region */
if ((write_ind <= port->cpu_buf) ||
(write_ind > current_dsp_buf)) {
pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
__func__, write_ind,
current_dsp_buf, port->cpu_buf);
return -EINVAL;
}
}
q6usm_add_hdr(usc, &cmd_write.hdr,
(sizeof(cmd_write) - APR_HDR_SIZE), false);
cmd_write.hdr.opcode = USM_DATA_CMD_WRITE;
cmd_write.buf_size = port->buf_size;
while (port->cpu_buf != write_ind) {
u32 temp_cpu_buf = port->cpu_buf;
cmd_write.buf_add = (uint32_t)(port->phys) +
port->buf_size * (port->cpu_buf);
cmd_write.uid = port->cpu_buf;
cmd_write.hdr.token = port->cpu_buf;
++(port->cpu_buf);
if (port->cpu_buf == port->buf_cnt)
port->cpu_buf = 0;
rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write);
if (rc < 0) {
port->cpu_buf = temp_cpu_buf;
pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n",
__func__, cmd_write.hdr.opcode,
rc, port->cpu_buf);
break;
}
rc = 0;
}
return rc;
}
bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t* free_region)
{
struct us_port_data *port = NULL;
u32 cpu_buf = 0;
if ((usc == NULL) || !free_region) {
pr_err("%s: input data wrong\n", __func__);
return false;
}
port = &usc->port[IN];
cpu_buf = port->cpu_buf + 1;
if (cpu_buf == port->buf_cnt)
cpu_buf = 0;
*free_region = port->dsp_buf;
return cpu_buf == *free_region;
}
int q6usm_cmd(struct us_client *usc, int cmd)
{
struct apr_hdr hdr;
int rc = 0;
atomic_t *state;
if ((usc == NULL) || (usc->apr == NULL)) {
pr_err("%s: APR handle NULL\n", __func__);
return -EINVAL;
}
q6usm_add_hdr(usc, &hdr, (sizeof(hdr) - APR_HDR_SIZE), true);
switch (cmd) {
case CMD_CLOSE:
hdr.opcode = USM_STREAM_CMD_CLOSE;
state = &usc->cmd_state;
break;
default:
pr_err("%s:Invalid format[%d]\n", __func__, cmd);
goto fail_cmd;
}
rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr);
if (rc < 0) {
pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode);
goto fail_cmd;
}
rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0),
Q6USM_TIMEOUT_JIFFIES);
if (!rc) {
rc = -ETIME;
pr_err("%s:timeout. waited for response opcode[0x%x]\n",
__func__, hdr.opcode);
} else
rc = 0;
fail_cmd:
return rc;
}
int q6usm_set_us_detection(struct us_client *usc,
struct usm_session_cmd_detect_info *detect_info,
uint16_t detect_info_size)
{
int rc = 0;
if ((usc == NULL) ||
(detect_info_size == 0) ||
(detect_info == NULL)) {
pr_err("%s: wrong input: usc=0x%p, inf_size=%d; info=0x%p",
__func__,
usc,
detect_info_size,
detect_info);
return -EINVAL;
}
q6usm_add_hdr(usc, &detect_info->hdr,
detect_info_size - APR_HDR_SIZE, true);
detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE;
rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info);
if (rc < 0) {
pr_err("%s:Comamnd signal detect failed\n", __func__);
return -EINVAL;
}
rc = wait_event_timeout(usc->cmd_wait,
(atomic_read(&usc->cmd_state) == 0),
Q6USM_TIMEOUT_JIFFIES);
if (!rc) {
rc = -ETIME;
pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n",
__func__, Q6USM_TIMEOUT_JIFFIES);
} else
rc = 0;
return rc;
}
static int __init q6usm_init(void)
{
pr_debug("%s\n", __func__);
init_waitqueue_head(&this_mmap.cmd_wait);
memset(session, 0, sizeof(session));
return 0;
}
device_initcall(q6usm_init);
| gpl-2.0 |
einon/staging-et131x | drivers/gpu/drm/tegra/dpaux.c | 94 | 12898 | /*
* Copyright (C) 2013 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
#include <linux/workqueue.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
#include "dpaux.h"
#include "drm.h"
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
struct tegra_dpaux {
struct drm_dp_aux aux;
struct device *dev;
void __iomem *regs;
int irq;
struct tegra_output *output;
struct reset_control *rst;
struct clk *clk_parent;
struct clk *clk;
struct regulator *vdd;
struct completion complete;
struct work_struct work;
struct list_head list;
};
static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
{
return container_of(aux, struct tegra_dpaux, aux);
}
static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work)
{
return container_of(work, struct tegra_dpaux, work);
}
static inline unsigned long tegra_dpaux_readl(struct tegra_dpaux *dpaux,
unsigned long offset)
{
return readl(dpaux->regs + (offset << 2));
}
static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
unsigned long value,
unsigned long offset)
{
writel(value, dpaux->regs + (offset << 2));
}
static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
size_t size)
{
unsigned long offset = DPAUX_DP_AUXDATA_WRITE(0);
size_t i, j;
for (i = 0; i < size; i += 4) {
size_t num = min_t(size_t, size - i, 4);
unsigned long value = 0;
for (j = 0; j < num; j++)
value |= buffer[i + j] << (j * 8);
tegra_dpaux_writel(dpaux, value, offset++);
}
}
static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
size_t size)
{
unsigned long offset = DPAUX_DP_AUXDATA_READ(0);
size_t i, j;
for (i = 0; i < size; i += 4) {
size_t num = min_t(size_t, size - i, 4);
unsigned long value;
value = tegra_dpaux_readl(dpaux, offset++);
for (j = 0; j < num; j++)
buffer[i + j] = value >> (j * 8);
}
}
static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
unsigned long timeout = msecs_to_jiffies(250);
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long status;
ssize_t ret = 0;
u32 value;
/* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
if (msg->size > 16)
return -EINVAL;
/*
* Allow zero-sized messages only for I2C, in which case they specify
* address-only transactions.
*/
if (msg->size < 1) {
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_I2C_WRITE:
case DP_AUX_I2C_READ:
value = DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY;
break;
default:
return -EINVAL;
}
} else {
/* For non-zero-sized messages, set the CMDLEN field. */
value = DPAUX_DP_AUXCTL_CMDLEN(msg->size - 1);
}
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_I2C_WRITE:
if (msg->request & DP_AUX_I2C_MOT)
value |= DPAUX_DP_AUXCTL_CMD_MOT_WR;
else
value |= DPAUX_DP_AUXCTL_CMD_I2C_WR;
break;
case DP_AUX_I2C_READ:
if (msg->request & DP_AUX_I2C_MOT)
value |= DPAUX_DP_AUXCTL_CMD_MOT_RD;
else
value |= DPAUX_DP_AUXCTL_CMD_I2C_RD;
break;
case DP_AUX_I2C_STATUS:
if (msg->request & DP_AUX_I2C_MOT)
value |= DPAUX_DP_AUXCTL_CMD_MOT_RQ;
else
value |= DPAUX_DP_AUXCTL_CMD_I2C_RQ;
break;
case DP_AUX_NATIVE_WRITE:
value |= DPAUX_DP_AUXCTL_CMD_AUX_WR;
break;
case DP_AUX_NATIVE_READ:
value |= DPAUX_DP_AUXCTL_CMD_AUX_RD;
break;
default:
return -EINVAL;
}
tegra_dpaux_writel(dpaux, msg->address, DPAUX_DP_AUXADDR);
tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
if ((msg->request & DP_AUX_I2C_READ) == 0) {
tegra_dpaux_write_fifo(dpaux, msg->buffer, msg->size);
ret = msg->size;
}
/* start transaction */
value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXCTL);
value |= DPAUX_DP_AUXCTL_TRANSACTREQ;
tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
status = wait_for_completion_timeout(&dpaux->complete, timeout);
if (!status)
return -ETIMEDOUT;
/* read status and clear errors */
value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
tegra_dpaux_writel(dpaux, 0xf00, DPAUX_DP_AUXSTAT);
if (value & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR)
return -ETIMEDOUT;
if ((value & DPAUX_DP_AUXSTAT_RX_ERROR) ||
(value & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR) ||
(value & DPAUX_DP_AUXSTAT_NO_STOP_ERROR))
return -EIO;
switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
case 0x00:
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
break;
case 0x01:
msg->reply = DP_AUX_NATIVE_REPLY_NACK;
break;
case 0x02:
msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
break;
case 0x04:
msg->reply = DP_AUX_I2C_REPLY_NACK;
break;
case 0x08:
msg->reply = DP_AUX_I2C_REPLY_DEFER;
break;
}
if ((msg->size > 0) && (msg->reply == DP_AUX_NATIVE_REPLY_ACK)) {
if (msg->request & DP_AUX_I2C_READ) {
size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
if (WARN_ON(count != msg->size))
count = min_t(size_t, count, msg->size);
tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
ret = count;
}
}
return ret;
}
static void tegra_dpaux_hotplug(struct work_struct *work)
{
struct tegra_dpaux *dpaux = work_to_dpaux(work);
if (dpaux->output)
drm_helper_hpd_irq_event(dpaux->output->connector.dev);
}
static irqreturn_t tegra_dpaux_irq(int irq, void *data)
{
struct tegra_dpaux *dpaux = data;
irqreturn_t ret = IRQ_HANDLED;
unsigned long value;
/* clear interrupts */
value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX);
tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
if (value & (DPAUX_INTR_PLUG_EVENT | DPAUX_INTR_UNPLUG_EVENT))
schedule_work(&dpaux->work);
if (value & DPAUX_INTR_IRQ_EVENT) {
/* TODO: handle this */
}
if (value & DPAUX_INTR_AUX_DONE)
complete(&dpaux->complete);
return ret;
}
static int tegra_dpaux_probe(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux;
struct resource *regs;
unsigned long value;
int err;
dpaux = devm_kzalloc(&pdev->dev, sizeof(*dpaux), GFP_KERNEL);
if (!dpaux)
return -ENOMEM;
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
init_completion(&dpaux->complete);
INIT_LIST_HEAD(&dpaux->list);
dpaux->dev = &pdev->dev;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dpaux->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(dpaux->regs))
return PTR_ERR(dpaux->regs);
dpaux->irq = platform_get_irq(pdev, 0);
if (dpaux->irq < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
return -ENXIO;
}
dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
if (IS_ERR(dpaux->rst))
return PTR_ERR(dpaux->rst);
dpaux->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dpaux->clk))
return PTR_ERR(dpaux->clk);
err = clk_prepare_enable(dpaux->clk);
if (err < 0)
return err;
reset_control_deassert(dpaux->rst);
dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dpaux->clk_parent))
return PTR_ERR(dpaux->clk_parent);
err = clk_prepare_enable(dpaux->clk_parent);
if (err < 0)
return err;
err = clk_set_rate(dpaux->clk_parent, 270000000);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
err);
return err;
}
dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd))
return PTR_ERR(dpaux->vdd);
err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
dev_name(dpaux->dev), dpaux);
if (err < 0) {
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
dpaux->irq, err);
return err;
}
dpaux->aux.transfer = tegra_dpaux_transfer;
dpaux->aux.dev = &pdev->dev;
err = drm_dp_aux_register(&dpaux->aux);
if (err < 0)
return err;
/* enable and clear all interrupts */
value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
tegra_dpaux_writel(dpaux, value, DPAUX_INTR_EN_AUX);
tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
mutex_lock(&dpaux_lock);
list_add_tail(&dpaux->list, &dpaux_list);
mutex_unlock(&dpaux_lock);
platform_set_drvdata(pdev, dpaux);
return 0;
}
static int tegra_dpaux_remove(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
drm_dp_aux_unregister(&dpaux->aux);
mutex_lock(&dpaux_lock);
list_del(&dpaux->list);
mutex_unlock(&dpaux_lock);
cancel_work_sync(&dpaux->work);
clk_disable_unprepare(dpaux->clk_parent);
reset_control_assert(dpaux->rst);
clk_disable_unprepare(dpaux->clk);
return 0;
}
static const struct of_device_id tegra_dpaux_of_match[] = {
{ .compatible = "nvidia,tegra124-dpaux", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
struct platform_driver tegra_dpaux_driver = {
.driver = {
.name = "tegra-dpaux",
.of_match_table = tegra_dpaux_of_match,
},
.probe = tegra_dpaux_probe,
.remove = tegra_dpaux_remove,
};
struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np)
{
struct tegra_dpaux *dpaux;
mutex_lock(&dpaux_lock);
list_for_each_entry(dpaux, &dpaux_list, list)
if (np == dpaux->dev->of_node) {
mutex_unlock(&dpaux_lock);
return dpaux;
}
mutex_unlock(&dpaux_lock);
return NULL;
}
int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
{
unsigned long timeout;
int err;
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
dpaux->output = output;
err = regulator_enable(dpaux->vdd);
if (err < 0)
return err;
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
enum drm_connector_status status;
status = tegra_dpaux_detect(dpaux);
if (status == connector_status_connected)
return 0;
usleep_range(1000, 2000);
}
return -ETIMEDOUT;
}
int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
{
unsigned long timeout;
int err;
err = regulator_disable(dpaux->vdd);
if (err < 0)
return err;
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
enum drm_connector_status status;
status = tegra_dpaux_detect(dpaux);
if (status == connector_status_disconnected) {
dpaux->output = NULL;
return 0;
}
usleep_range(1000, 2000);
}
return -ETIMEDOUT;
}
enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux)
{
unsigned long value;
value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
if (value & DPAUX_DP_AUXSTAT_HPD_STATUS)
return connector_status_connected;
return connector_status_disconnected;
}
int tegra_dpaux_enable(struct tegra_dpaux *dpaux)
{
unsigned long value;
value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
DPAUX_HYBRID_PADCTL_MODE_AUX;
tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
return 0;
}
int tegra_dpaux_disable(struct tegra_dpaux *dpaux)
{
unsigned long value;
value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
return 0;
}
int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding)
{
int err;
err = drm_dp_dpcd_writeb(&dpaux->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
encoding);
if (err < 0)
return err;
return 0;
}
int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
u8 pattern)
{
u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
u8 status[DP_LINK_STATUS_SIZE], values[4];
unsigned int i;
int err;
err = drm_dp_dpcd_writeb(&dpaux->aux, DP_TRAINING_PATTERN_SET, pattern);
if (err < 0)
return err;
if (tp == DP_TRAINING_PATTERN_DISABLE)
return 0;
for (i = 0; i < link->num_lanes; i++)
values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
DP_TRAIN_PRE_EMPHASIS_0 |
DP_TRAIN_MAX_SWING_REACHED |
DP_TRAIN_VOLTAGE_SWING_400;
err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values,
link->num_lanes);
if (err < 0)
return err;
usleep_range(500, 1000);
err = drm_dp_dpcd_read_link_status(&dpaux->aux, status);
if (err < 0)
return err;
switch (tp) {
case DP_TRAINING_PATTERN_1:
if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
return -EAGAIN;
break;
case DP_TRAINING_PATTERN_2:
if (!drm_dp_channel_eq_ok(status, link->num_lanes))
return -EAGAIN;
break;
default:
dev_err(dpaux->dev, "unsupported training pattern %u\n", tp);
return -EINVAL;
}
err = drm_dp_dpcd_writeb(&dpaux->aux, DP_EDP_CONFIGURATION_SET, 0);
if (err < 0)
return err;
return 0;
}
| gpl-2.0 |
TeamBliss-Devices/android_kernel_samsung_ms013g | arch/mn10300/kernel/smp.c | 350 | 28276 | /* SMP support routines.
*
* Copyright (C) 2006-2008 Panasonic Corporation
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/profile.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
#include <asm/processor.h>
#include <asm/bug.h>
#include <asm/exceptions.h>
#include <asm/hardirq.h>
#include <asm/fpu.h>
#include <asm/mmu_context.h>
#include <asm/thread_info.h>
#include <asm/cpu-regs.h>
#include <asm/intctl-regs.h>
#include "internal.h"
#ifdef CONFIG_HOTPLUG_CPU
#include <asm/cacheflush.h>
static unsigned long sleep_mode[NR_CPUS];
static void run_sleep_cpu(unsigned int cpu);
static void run_wakeup_cpu(unsigned int cpu);
#endif /* CONFIG_HOTPLUG_CPU */
/*
* Debug Message function
*/
#undef DEBUG_SMP
#ifdef DEBUG_SMP
#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#else
#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#endif
/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
#define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
/*
* Structure and data for smp_nmi_call_function().
*/
struct nmi_call_data_struct {
smp_call_func_t func;
void *info;
cpumask_t started;
cpumask_t finished;
int wait;
char size_alignment[0]
__attribute__ ((__aligned__(SMP_CACHE_BYTES)));
} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
static DEFINE_SPINLOCK(smp_nmi_call_lock);
static struct nmi_call_data_struct *nmi_call_data;
/*
* Data structures and variables
*/
static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
cpumask_t cpu_boot_map; /* Bitmask of boot APs */
unsigned long start_stack[NR_CPUS - 1];
/*
* Per CPU parameters
*/
struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
static int cpucount; /* The count of boot CPUs */
static cpumask_t smp_commenced_mask;
cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
/*
* Function Prototypes
*/
static int do_boot_cpu(int);
static void smp_show_cpu_info(int cpu_id);
static void smp_callin(void);
static void smp_online(void);
static void smp_store_cpu_info(int);
static void smp_cpu_init(void);
static void smp_tune_scheduling(void);
static void send_IPI_mask(const cpumask_t *cpumask, int irq);
static void init_ipi(void);
/*
* IPI Initialization interrupt definitions
*/
static void mn10300_ipi_disable(unsigned int irq);
static void mn10300_ipi_enable(unsigned int irq);
static void mn10300_ipi_chip_disable(struct irq_data *d);
static void mn10300_ipi_chip_enable(struct irq_data *d);
static void mn10300_ipi_ack(struct irq_data *d);
static void mn10300_ipi_nop(struct irq_data *d);
static struct irq_chip mn10300_ipi_type = {
.name = "cpu_ipi",
.irq_disable = mn10300_ipi_chip_disable,
.irq_enable = mn10300_ipi_chip_enable,
.irq_ack = mn10300_ipi_ack,
.irq_eoi = mn10300_ipi_nop
};
static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
static struct irqaction reschedule_ipi = {
.handler = smp_reschedule_interrupt,
.name = "smp reschedule IPI"
};
static struct irqaction call_function_ipi = {
.handler = smp_call_function_interrupt,
.name = "smp call function IPI"
};
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
static struct irqaction local_timer_ipi = {
.handler = smp_ipi_timer_interrupt,
.flags = IRQF_DISABLED,
.name = "smp local timer IPI"
};
#endif
/**
* init_ipi - Initialise the IPI mechanism
*/
static void init_ipi(void)
{
unsigned long flags;
u16 tmp16;
/* set up the reschedule IPI */
irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type,
handle_percpu_irq);
setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
mn10300_ipi_enable(RESCHEDULE_IPI);
/* set up the call function IPI */
irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type,
handle_percpu_irq);
setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
/* set up the local timer IPI */
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type,
handle_percpu_irq);
setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
mn10300_ipi_enable(LOCAL_TIMER_IPI);
#endif
#ifdef CONFIG_MN10300_CACHE_ENABLED
/* set up the cache flush IPI */
flags = arch_local_cli_save();
__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
mn10300_low_ipi_handler);
GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(FLUSH_CACHE_IPI);
arch_local_irq_restore(flags);
#endif
/* set up the NMI call function IPI */
flags = arch_local_cli_save();
GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
arch_local_irq_restore(flags);
/* set up the SMP boot IPI */
flags = arch_local_cli_save();
__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
mn10300_low_ipi_handler);
arch_local_irq_restore(flags);
}
/**
* mn10300_ipi_shutdown - Shut down handling of an IPI
* @irq: The IPI to be shut down.
*/
static void mn10300_ipi_shutdown(unsigned int irq)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
/**
* mn10300_ipi_enable - Enable an IPI
* @irq: The IPI to be enabled.
*/
static void mn10300_ipi_enable(unsigned int irq)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void mn10300_ipi_chip_enable(struct irq_data *d)
{
mn10300_ipi_enable(d->irq);
}
/**
* mn10300_ipi_disable - Disable an IPI
* @irq: The IPI to be disabled.
*/
static void mn10300_ipi_disable(unsigned int irq)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = tmp & GxICR_LEVEL;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void mn10300_ipi_chip_disable(struct irq_data *d)
{
mn10300_ipi_disable(d->irq);
}
/**
* mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
* @irq: The IPI to be acknowledged.
*
* Clear the interrupt detection flag for the IPI on the appropriate interrupt
* channel in the PIC.
*/
static void mn10300_ipi_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
GxICR_u8(irq) = GxICR_DETECT;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
/**
* mn10300_ipi_nop - Dummy IPI action
* @irq: The IPI to be acted upon.
*/
static void mn10300_ipi_nop(struct irq_data *d)
{
}
/**
* send_IPI_mask - Send IPIs to all CPUs in list
* @cpumask: The list of CPUs to target.
* @irq: The IPI request to be sent.
*
* Send the specified IPI to all the CPUs in the list, not waiting for them to
* finish before returning. The caller is responsible for synchronisation if
* that is needed.
*/
static void send_IPI_mask(const cpumask_t *cpumask, int irq)
{
int i;
u16 tmp;
for (i = 0; i < NR_CPUS; i++) {
if (cpumask_test_cpu(i, cpumask)) {
/* send IPI */
tmp = CROSS_GxICR(irq, i);
CROSS_GxICR(irq, i) =
tmp | GxICR_REQUEST | GxICR_DETECT;
tmp = CROSS_GxICR(irq, i); /* flush write buffer */
}
}
}
/**
* send_IPI_self - Send an IPI to this CPU.
* @irq: The IPI request to be sent.
*
* Send the specified IPI to the current CPU.
*/
void send_IPI_self(int irq)
{
send_IPI_mask(cpumask_of(smp_processor_id()), irq);
}
/**
* send_IPI_allbutself - Send IPIs to all the other CPUs.
* @irq: The IPI request to be sent.
*
* Send the specified IPI to all CPUs in the system barring the current one,
* not waiting for them to finish before returning. The caller is responsible
* for synchronisation if that is needed.
*/
void send_IPI_allbutself(int irq)
{
cpumask_t cpumask;
cpumask_copy(&cpumask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &cpumask);
send_IPI_mask(&cpumask, irq);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
BUG();
/*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
}
void arch_send_call_function_single_ipi(int cpu)
{
send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
}
/**
* smp_send_reschedule - Send reschedule IPI to a CPU
* @cpu: The CPU to target.
*/
void smp_send_reschedule(int cpu)
{
send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
}
/**
* smp_nmi_call_function - Send a call function NMI IPI to all CPUs
* @func: The function to ask to be run.
* @info: The context data to pass to that function.
* @wait: If true, wait (atomically) until function is run on all CPUs.
*
* Send a non-maskable request to all CPUs in the system, requesting them to
* run the specified function with the given context data, and, potentially, to
* wait for completion of that function on all CPUs.
*
* Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
* timeout.
*/
int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
{
struct nmi_call_data_struct data;
unsigned long flags;
unsigned int cnt;
int cpus, ret = 0;
cpus = num_online_cpus() - 1;
if (cpus < 1)
return 0;
data.func = func;
data.info = info;
cpumask_copy(&data.started, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &data.started);
data.wait = wait;
if (wait)
data.finished = data.started;
spin_lock_irqsave(&smp_nmi_call_lock, flags);
nmi_call_data = &data;
smp_mb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
/* Wait for response */
if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
for (cnt = 0;
cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
!cpumask_empty(&data.started);
cnt++)
mdelay(1);
if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
for (cnt = 0;
cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
!cpumask_empty(&data.finished);
cnt++)
mdelay(1);
}
if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
ret = -ETIMEDOUT;
} else {
/* If timeout value is zero, wait until cpumask has been
* cleared */
while (!cpumask_empty(&data.started))
barrier();
if (wait)
while (!cpumask_empty(&data.finished))
barrier();
}
spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
return ret;
}
/**
* smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI
*
* Send a non-maskable request to all other CPUs in the system, instructing
* them to jump into the debugger. The caller is responsible for checking that
* the other CPUs responded to the instruction.
*
* The caller should make sure that this CPU's debugger IPI is disabled.
*/
void smp_jump_to_debugger(void)
{
if (num_online_cpus() > 1)
/* Send a message to all other CPUs */
send_IPI_allbutself(DEBUGGER_NMI_IPI);
}
/**
* stop_this_cpu - Callback to stop a CPU.
* @unused: Callback context (ignored).
*/
void stop_this_cpu(void *unused)
{
static volatile int stopflag;
unsigned long flags;
#ifdef CONFIG_GDBSTUB
/* In case of single stepping smp_send_stop by other CPU,
* clear procindebug to avoid deadlock.
*/
atomic_set(&procindebug[smp_processor_id()], 0);
#endif /* CONFIG_GDBSTUB */
flags = arch_local_cli_save();
set_cpu_online(smp_processor_id(), false);
while (!stopflag)
cpu_relax();
set_cpu_online(smp_processor_id(), true);
arch_local_irq_restore(flags);
}
/**
* smp_send_stop - Send a stop request to all CPUs.
*/
void smp_send_stop(void)
{
smp_nmi_call_function(stop_this_cpu, NULL, 0);
}
/**
* smp_reschedule_interrupt - Reschedule IPI handler
* @irq: The interrupt number.
* @dev_id: The device ID.
*
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
*/
static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}
/**
* smp_call_function_interrupt - Call function IPI handler
* @irq: The interrupt number.
* @dev_id: The device ID.
*
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
*/
static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
{
/* generic_smp_call_function_interrupt(); */
generic_smp_call_function_single_interrupt();
return IRQ_HANDLED;
}
/**
* smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
*/
void smp_nmi_call_function_interrupt(void)
{
smp_call_func_t func = nmi_call_data->func;
void *info = nmi_call_data->info;
int wait = nmi_call_data->wait;
/* Notify the initiating CPU that I've grabbed the data and am about to
* execute the function
*/
smp_mb();
cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);
(*func)(info);
if (wait) {
smp_mb();
cpumask_clear_cpu(smp_processor_id(),
&nmi_call_data->finished);
}
}
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
/**
* smp_ipi_timer_interrupt - Local timer IPI handler
* @irq: The interrupt number.
* @dev_id: The device ID.
*
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
*/
static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
{
return local_timer_interrupt();
}
#endif
void __init smp_init_cpus(void)
{
int i;
for (i = 0; i < NR_CPUS; i++) {
set_cpu_possible(i, true);
set_cpu_present(i, true);
}
}
/**
* smp_cpu_init - Initialise AP in start_secondary.
*
* For this Application Processor, set up init_mm, initialise FPU and set
* interrupt level 0-6 setting.
*/
static void __init smp_cpu_init(void)
{
unsigned long flags;
int cpu_id = smp_processor_id();
u16 tmp16;
if (test_and_set_bit(cpu_id, &cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
for (;;)
local_irq_enable();
}
printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
/* Force FPU initialization */
clear_using_fpu(current);
GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(LOCAL_TIMER_IPI);
GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(RESCHEDULE_IPI);
#ifdef CONFIG_MN10300_CACHE_ENABLED
GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
mn10300_ipi_enable(FLUSH_CACHE_IPI);
#endif
mn10300_ipi_shutdown(SMP_BOOT_IRQ);
/* Set up the non-maskable call function IPI */
flags = arch_local_cli_save();
GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
arch_local_irq_restore(flags);
}
/**
* smp_prepare_cpu_init - Initialise CPU in startup_secondary
*
* Set interrupt level 0-6 setting and init ICR of the kernel debugger.
*/
void smp_prepare_cpu_init(void)
{
int loop;
/* Set the interrupt vector registers */
IVAR0 = EXCEP_IRQ_LEVEL0;
IVAR1 = EXCEP_IRQ_LEVEL1;
IVAR2 = EXCEP_IRQ_LEVEL2;
IVAR3 = EXCEP_IRQ_LEVEL3;
IVAR4 = EXCEP_IRQ_LEVEL4;
IVAR5 = EXCEP_IRQ_LEVEL5;
IVAR6 = EXCEP_IRQ_LEVEL6;
/* Disable all interrupts and set to priority 6 (lowest) */
for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
#ifdef CONFIG_KERNEL_DEBUGGER
/* initialise the kernel debugger interrupt */
do {
unsigned long flags;
u16 tmp16;
flags = arch_local_cli_save();
GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(DEBUGGER_NMI_IPI);
arch_local_irq_restore(flags);
} while (0);
#endif
}
/**
* start_secondary - Activate a secondary CPU (AP)
* @unused: Thread parameter (ignored).
*/
int __init start_secondary(void *unused)
{
smp_cpu_init();
smp_callin();
while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
cpu_relax();
local_flush_tlb();
preempt_disable();
smp_online();
#ifdef CONFIG_GENERIC_CLOCKEVENTS
init_clockevents();
#endif
cpu_idle();
return 0;
}
/**
* smp_prepare_cpus - Boot up secondary CPUs (APs)
* @max_cpus: Maximum number of CPUs to boot.
*
* Call do_boot_cpu, and boot up APs.
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int phy_id;
/* Setup boot CPU information */
smp_store_cpu_info(0);
smp_tune_scheduling();
init_ipi();
/* If SMP should be disabled, then finish */
if (max_cpus == 0) {
printk(KERN_INFO "SMP mode deactivated.\n");
goto smp_done;
}
/* Boot secondary CPUs (for which phy_id > 0) */
for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
/* Don't boot primary CPU */
if (max_cpus <= cpucount + 1)
continue;
if (phy_id != 0)
do_boot_cpu(phy_id);
set_cpu_possible(phy_id, true);
smp_show_cpu_info(phy_id);
}
smp_done:
Dprintk("Boot done.\n");
}
/**
* smp_store_cpu_info - Save a CPU's information
* @cpu: The CPU to save for.
*
* Save boot_cpu_data and jiffy for the specified CPU.
*/
static void __init smp_store_cpu_info(int cpu)
{
struct mn10300_cpuinfo *ci = &cpu_data[cpu];
*ci = boot_cpu_data;
ci->loops_per_jiffy = loops_per_jiffy;
ci->type = CPUREV;
}
/**
* smp_tune_scheduling - Set time slice value
*
* Nothing to do here.
*/
static void __init smp_tune_scheduling(void)
{
}
/**
* do_boot_cpu: Boot up one CPU
* @phy_id: Physical ID of CPU to boot.
*
* Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
* otherwise.
*/
static int __init do_boot_cpu(int phy_id)
{
struct task_struct *idle;
unsigned long send_status, callin_status;
int timeout, cpu_id;
send_status = GxICR_REQUEST;
callin_status = 0;
timeout = 0;
cpu_id = phy_id;
cpucount++;
/* Create idle thread for this CPU */
idle = fork_idle(cpu_id);
if (IS_ERR(idle))
panic("Failed fork for CPU#%d.", cpu_id);
idle->thread.pc = (unsigned long)start_secondary;
printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
start_stack[cpu_id - 1] = idle->thread.sp;
task_thread_info(idle)->cpu = cpu_id;
/* Send boot IPI to AP */
send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
Dprintk("Waiting for send to finish...\n");
/* Wait for AP's IPI receive in 100[ms] */
do {
udelay(1000);
send_status =
CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
} while (send_status == GxICR_REQUEST && timeout++ < 100);
Dprintk("Waiting for cpu_callin_map.\n");
if (send_status == 0) {
/* Allow AP to start initializing */
cpumask_set_cpu(cpu_id, &cpu_callout_map);
/* Wait for setting cpu_callin_map */
timeout = 0;
do {
udelay(1000);
callin_status = cpumask_test_cpu(cpu_id,
&cpu_callin_map);
} while (callin_status == 0 && timeout++ < 5000);
if (callin_status == 0)
Dprintk("Not responding.\n");
} else {
printk(KERN_WARNING "IPI not delivered.\n");
}
if (send_status == GxICR_REQUEST || callin_status == 0) {
cpumask_clear_cpu(cpu_id, &cpu_callout_map);
cpumask_clear_cpu(cpu_id, &cpu_callin_map);
cpumask_clear_cpu(cpu_id, &cpu_initialized);
cpucount--;
return 1;
}
return 0;
}
/**
* smp_show_cpu_info - Show SMP CPU information
* @cpu: The CPU of interest.
*/
static void __init smp_show_cpu_info(int cpu)
{
struct mn10300_cpuinfo *ci = &cpu_data[cpu];
printk(KERN_INFO
"CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
cpu,
MN10300_IOCLK / 1000000,
(MN10300_IOCLK / 10000) % 100,
ci->loops_per_jiffy / (500000 / HZ),
(ci->loops_per_jiffy / (5000 / HZ)) % 100);
}
/**
* smp_callin - Set cpu_callin_map of the current CPU ID
*/
static void __init smp_callin(void)
{
unsigned long timeout;
int cpu;
cpu = smp_processor_id();
timeout = jiffies + (2 * HZ);
if (cpumask_test_cpu(cpu, &cpu_callin_map)) {
printk(KERN_ERR "CPU#%d already present.\n", cpu);
BUG();
}
Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
/* Wait for AP startup 2s total */
while (time_before(jiffies, timeout)) {
if (cpumask_test_cpu(cpu, &cpu_callout_map))
break;
cpu_relax();
}
if (!time_before(jiffies, timeout)) {
printk(KERN_ERR
"BUG: CPU#%d started up but did not get a callout!\n",
cpu);
BUG();
}
#ifdef CONFIG_CALIBRATE_DELAY
calibrate_delay(); /* Get our bogomips */
#endif
/* Save our processor parameters */
smp_store_cpu_info(cpu);
/* Allow the boot processor to continue */
cpumask_set_cpu(cpu, &cpu_callin_map);
}
/**
* smp_online - Set cpu_online_mask
*/
static void __init smp_online(void)
{
int cpu;
cpu = smp_processor_id();
notify_cpu_starting(cpu);
ipi_call_lock();
set_cpu_online(cpu, true);
ipi_call_unlock();
local_irq_enable();
}
/**
* smp_cpus_done -
* @max_cpus: Maximum CPU count.
*
* Do nothing.
*/
void __init smp_cpus_done(unsigned int max_cpus)
{
}
/*
* smp_prepare_boot_cpu - Set up stuff for the boot processor.
*
* Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot
* processor (CPU 0).
*/
void __devinit smp_prepare_boot_cpu(void)
{
cpumask_set_cpu(0, &cpu_callout_map);
cpumask_set_cpu(0, &cpu_callin_map);
current_thread_info()->cpu = 0;
}
/*
* initialize_secondary - Initialise a secondary CPU (Application Processor).
*
* Set SP register and jump to thread's PC address.
*/
void initialize_secondary(void)
{
asm volatile (
"mov %0,sp \n"
"jmp (%1) \n"
:
: "a"(current->thread.sp), "a"(current->thread.pc));
}
/**
* __cpu_up - Set smp_commenced_mask for the nominated CPU
* @cpu: The target CPU.
*/
int __devinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int timeout;
#ifdef CONFIG_HOTPLUG_CPU
if (num_online_cpus() == 1)
disable_hlt();
if (sleep_mode[cpu])
run_wakeup_cpu(cpu);
#endif /* CONFIG_HOTPLUG_CPU */
cpumask_set_cpu(cpu, &smp_commenced_mask);
/* Wait 5s total for a response */
for (timeout = 0 ; timeout < 5000 ; timeout++) {
if (cpu_online(cpu))
break;
udelay(1000);
}
BUG_ON(!cpu_online(cpu));
return 0;
}
/**
* setup_profiling_timer - Set up the profiling timer
* @multiplier - The frequency multiplier to use
*
* The frequency of the profiling timer can be changed by writing a multiplier
* value into /proc/profile.
*/
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
/*
* CPU hotplug routines
*/
#ifdef CONFIG_HOTPLUG_CPU
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int cpu, ret;
for_each_cpu(cpu) {
ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
if (ret)
printk(KERN_WARNING
"topology_init: register_cpu %d failed (%d)\n",
cpu, ret);
}
return 0;
}
subsys_initcall(topology_init);
int __cpu_disable(void)
{
int cpu = smp_processor_id();
if (cpu == 0)
return -EBUSY;
migrate_irqs();
cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));
return 0;
}
void __cpu_die(unsigned int cpu)
{
run_sleep_cpu(cpu);
if (num_online_cpus() == 1)
enable_hlt();
}
#ifdef CONFIG_MN10300_CACHE_ENABLED
static inline void hotplug_cpu_disable_cache(void)
{
int tmp;
asm volatile(
" movhu (%1),%0 \n"
" and %2,%0 \n"
" movhu %0,(%1) \n"
"1: movhu (%1),%0 \n"
" btst %3,%0 \n"
" bne 1b \n"
: "=&r"(tmp)
: "a"(&CHCTR),
"i"(~(CHCTR_ICEN | CHCTR_DCEN)),
"i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
: "memory", "cc");
}
static inline void hotplug_cpu_enable_cache(void)
{
int tmp;
asm volatile(
"movhu (%1),%0 \n"
"or %2,%0 \n"
"movhu %0,(%1) \n"
: "=&r"(tmp)
: "a"(&CHCTR),
"i"(CHCTR_ICEN | CHCTR_DCEN)
: "memory", "cc");
}
static inline void hotplug_cpu_invalidate_cache(void)
{
int tmp;
asm volatile (
"movhu (%1),%0 \n"
"or %2,%0 \n"
"movhu %0,(%1) \n"
: "=&r"(tmp)
: "a"(&CHCTR),
"i"(CHCTR_ICINV | CHCTR_DCINV)
: "cc");
}
#else /* CONFIG_MN10300_CACHE_ENABLED */
#define hotplug_cpu_disable_cache() do {} while (0)
#define hotplug_cpu_enable_cache() do {} while (0)
#define hotplug_cpu_invalidate_cache() do {} while (0)
#endif /* CONFIG_MN10300_CACHE_ENABLED */
/**
* hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
* @cpumask: List of target CPUs.
* @func: The function to call on those CPUs.
* @info: The context data for the function to be called.
* @wait: Whether to wait for the calls to complete.
*
* Non-maskably call a function on another CPU for hotplug purposes.
*
* This function must be called with maskable interrupts disabled.
*/
static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
smp_call_func_t func, void *info,
int wait)
{
/*
* The address and the size of nmi_call_func_mask_data
* need to be aligned on L1_CACHE_BYTES.
*/
static struct nmi_call_data_struct nmi_call_func_mask_data
__cacheline_aligned;
unsigned long start, end;
start = (unsigned long)&nmi_call_func_mask_data;
end = start + sizeof(struct nmi_call_data_struct);
nmi_call_func_mask_data.func = func;
nmi_call_func_mask_data.info = info;
nmi_call_func_mask_data.started = cpumask;
nmi_call_func_mask_data.wait = wait;
if (wait)
nmi_call_func_mask_data.finished = cpumask;
spin_lock(&smp_nmi_call_lock);
nmi_call_data = &nmi_call_func_mask_data;
mn10300_local_dcache_flush_range(start, end);
smp_wmb();
send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
do {
mn10300_local_dcache_inv_range(start, end);
barrier();
} while (!cpumask_empty(&nmi_call_func_mask_data.started));
if (wait) {
do {
mn10300_local_dcache_inv_range(start, end);
barrier();
} while (!cpumask_empty(&nmi_call_func_mask_data.finished));
}
spin_unlock(&smp_nmi_call_lock);
return 0;
}
static void restart_wakeup_cpu(void)
{
unsigned int cpu = smp_processor_id();
cpumask_set_cpu(cpu, &cpu_callin_map);
local_flush_tlb();
set_cpu_online(cpu, true);
smp_wmb();
}
static void prepare_sleep_cpu(void *unused)
{
sleep_mode[smp_processor_id()] = 1;
smp_mb();
mn10300_local_dcache_flush_inv();
hotplug_cpu_disable_cache();
hotplug_cpu_invalidate_cache();
}
/* when this function called, IE=0, NMID=0. */
static void sleep_cpu(void *unused)
{
unsigned int cpu_id = smp_processor_id();
/*
* CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
* before this cpu goes in SLEEP mode.
*/
do {
smp_mb();
__sleep_cpu();
} while (sleep_mode[cpu_id]);
restart_wakeup_cpu();
}
static void run_sleep_cpu(unsigned int cpu)
{
unsigned long flags;
cpumask_t cpumask;
cpumask_copy(&cpumask, &cpumask_of(cpu));
flags = arch_local_cli_save();
hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
udelay(1); /* delay for the cpu to sleep. */
arch_local_irq_restore(flags);
}
static void wakeup_cpu(void)
{
hotplug_cpu_invalidate_cache();
hotplug_cpu_enable_cache();
smp_mb();
sleep_mode[smp_processor_id()] = 0;
}
static void run_wakeup_cpu(unsigned int cpu)
{
unsigned long flags;
flags = arch_local_cli_save();
#if NR_CPUS == 2
mn10300_local_dcache_flush_inv();
#else
/*
* Before waking up the cpu,
* all online cpus should stop and flush D-Cache for global data.
*/
#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
#endif
hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
arch_local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */
| gpl-2.0 |
popazerty/linux-sh4-2.6.32.y | drivers/ide/ide-timings.c | 606 | 6650 | /*
* Copyright (c) 1999-2001 Vojtech Pavlik
* Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/module.h>
/*
* PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
* These were taken from ATA/ATAPI-6 standard, rev 0a, except
* for PIO 5, which is a nonstandard extension and UDMA6, which
* is currently supported only by Maxtor drives.
*/
static struct ide_timing ide_timing[] = {
{ XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
{ XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
{ XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
{ XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
{ XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
{ XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
{ XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
{ XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
{ XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
{ XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
{ XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
{ XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
{ XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
{ XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
{ XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
{ XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
{ XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
{ XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
{ XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
{ XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
{ XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
{ XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 },
{ 0xff }
};
struct ide_timing *ide_timing_find_mode(u8 speed)
{
struct ide_timing *t;
for (t = ide_timing; t->mode != speed; t++)
if (t->mode == 0xff)
return NULL;
return t;
}
EXPORT_SYMBOL_GPL(ide_timing_find_mode);
u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
{
u16 *id = drive->id;
struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
u16 cycle = 0;
if (id[ATA_ID_FIELD_VALID] & 2) {
if (ata_id_has_iordy(drive->id))
cycle = id[ATA_ID_EIDE_PIO_IORDY];
else
cycle = id[ATA_ID_EIDE_PIO];
/* conservative "downgrade" for all pre-ATA2 drives */
if (pio < 3 && cycle < t->cycle)
cycle = 0; /* use standard timing */
/* Use the standard timing for the CF specific modes too */
if (pio > 4 && ata_id_is_cfa(id))
cycle = 0;
}
return cycle ? cycle : t->cycle;
}
EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0)
static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
int T, int UT)
{
q->setup = EZ(t->setup * 1000, T);
q->act8b = EZ(t->act8b * 1000, T);
q->rec8b = EZ(t->rec8b * 1000, T);
q->cyc8b = EZ(t->cyc8b * 1000, T);
q->active = EZ(t->active * 1000, T);
q->recover = EZ(t->recover * 1000, T);
q->cycle = EZ(t->cycle * 1000, T);
q->udma = EZ(t->udma * 1000, UT);
}
void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
struct ide_timing *m, unsigned int what)
{
if (what & IDE_TIMING_SETUP)
m->setup = max(a->setup, b->setup);
if (what & IDE_TIMING_ACT8B)
m->act8b = max(a->act8b, b->act8b);
if (what & IDE_TIMING_REC8B)
m->rec8b = max(a->rec8b, b->rec8b);
if (what & IDE_TIMING_CYC8B)
m->cyc8b = max(a->cyc8b, b->cyc8b);
if (what & IDE_TIMING_ACTIVE)
m->active = max(a->active, b->active);
if (what & IDE_TIMING_RECOVER)
m->recover = max(a->recover, b->recover);
if (what & IDE_TIMING_CYCLE)
m->cycle = max(a->cycle, b->cycle);
if (what & IDE_TIMING_UDMA)
m->udma = max(a->udma, b->udma);
}
EXPORT_SYMBOL_GPL(ide_timing_merge);
int ide_timing_compute(ide_drive_t *drive, u8 speed,
struct ide_timing *t, int T, int UT)
{
u16 *id = drive->id;
struct ide_timing *s, p;
/*
* Find the mode.
*/
s = ide_timing_find_mode(speed);
if (s == NULL)
return -EINVAL;
/*
* Copy the timing from the table.
*/
*t = *s;
/*
* If the drive is an EIDE drive, it can tell us it needs extended
* PIO/MWDMA cycle timing.
*/
if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
memset(&p, 0, sizeof(p));
if (speed <= XFER_PIO_2)
p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
else if ((speed <= XFER_PIO_4) ||
(speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
p.cycle = id[ATA_ID_EIDE_DMA_MIN];
ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
}
/*
* Convert the timing to bus clock counts.
*/
ide_timing_quantize(t, t, T, UT);
/*
* Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
* S.M.A.R.T and some other commands. We have to ensure that the
* DMA cycle timing is slower/equal than the fastest PIO timing.
*/
if (speed >= XFER_SW_DMA_0) {
u8 pio = ide_get_best_pio_mode(drive, 255, 5);
ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT);
ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
}
/*
* Lengthen active & recovery time so that cycle time is correct.
*/
if (t->act8b + t->rec8b < t->cyc8b) {
t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
t->rec8b = t->cyc8b - t->act8b;
}
if (t->active + t->recover < t->cycle) {
t->active += (t->cycle - (t->active + t->recover)) / 2;
t->recover = t->cycle - t->active;
}
return 0;
}
EXPORT_SYMBOL_GPL(ide_timing_compute);
| gpl-2.0 |
royale1223/omap-kernel | drivers/scsi/fcoe/fcoe.c | 606 | 68625 | /*
* Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
#include <linux/module.h>
#include <linux/version.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <net/rtnetlink.h>
#include <scsi/fc/fc_encaps.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include <scsi/libfcoe.h>
#include "fcoe.h"
MODULE_AUTHOR("Open-FCoE.org");
MODULE_DESCRIPTION("FCoE");
MODULE_LICENSE("GPL v2");
/* Performance tuning parameters for fcoe */
static unsigned int fcoe_ddp_min;
module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
"Direct Data Placement (DDP).");
DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
static DECLARE_COMPLETION(fcoe_flush_completion);
/* fcoe host list */
/* must only by accessed under the RTNL mutex */
LIST_HEAD(fcoe_hostlist);
DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
/* Function Prototypes */
static int fcoe_reset(struct Scsi_Host *);
static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
static int fcoe_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
static int fcoe_percpu_receive_thread(void *);
static void fcoe_percpu_clean(struct fc_lport *);
static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
static void fcoe_dev_cleanup(void);
static struct fcoe_interface
*fcoe_hostlist_lookup_port(const struct net_device *);
static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
static void fcoe_update_src_mac(struct fc_lport *, u8 *);
static u8 *fcoe_get_src_mac(struct fc_lport *);
static void fcoe_destroy_work(struct work_struct *);
static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
unsigned int);
static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
static bool fcoe_match(struct net_device *netdev);
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
static int fcoe_destroy(struct net_device *netdev);
static int fcoe_enable(struct net_device *netdev);
static int fcoe_disable(struct net_device *netdev);
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
unsigned int op,
void (*resp)(struct fc_seq *,
struct fc_frame *,
void *),
void *, u32 timeout);
static void fcoe_recv_frame(struct sk_buff *skb);
static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
};
/* notification function for CPU hotplug events */
static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
static struct scsi_transport_template *fcoe_nport_scsi_transport;
static struct scsi_transport_template *fcoe_vport_scsi_transport;
static int fcoe_vport_destroy(struct fc_vport *);
static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
static int fcoe_validate_vport_create(struct fc_vport *);
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
.ddp_setup = fcoe_ddp_setup,
.ddp_done = fcoe_ddp_done,
.elsct_send = fcoe_elsct_send,
.get_lesb = fcoe_get_lesb,
.lport_set_port_id = fcoe_set_port_id,
};
struct fc_function_template fcoe_nport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fc_get_host_speed,
.show_host_speed = 1,
.show_host_port_type = 1,
.get_host_port_state = fc_get_host_port_state,
.show_host_port_state = 1,
.show_host_symbolic_name = 1,
.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.show_host_fabric_name = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.show_starget_port_id = 1,
.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.get_fc_host_stats = fc_get_host_stats,
.issue_fc_host_lip = fcoe_reset,
.terminate_rport_io = fc_rport_terminate_io,
.vport_create = fcoe_vport_create,
.vport_delete = fcoe_vport_destroy,
.vport_disable = fcoe_vport_disable,
.set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
.bsg_request = fc_lport_bsg_request,
};
struct fc_function_template fcoe_vport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fc_get_host_speed,
.show_host_speed = 1,
.show_host_port_type = 1,
.get_host_port_state = fc_get_host_port_state,
.show_host_port_state = 1,
.show_host_symbolic_name = 1,
.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.show_host_fabric_name = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.show_starget_port_id = 1,
.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.get_fc_host_stats = fc_get_host_stats,
.issue_fc_host_lip = fcoe_reset,
.terminate_rport_io = fc_rport_terminate_io,
.bsg_request = fc_lport_bsg_request,
};
static struct scsi_host_template fcoe_shost_template = {
.module = THIS_MODULE,
.name = "FCoE Driver",
.proc_name = FCOE_NAME,
.queuecommand = fc_queuecommand,
.eh_abort_handler = fc_eh_abort,
.eh_device_reset_handler = fc_eh_device_reset,
.eh_host_reset_handler = fc_eh_host_reset,
.slave_alloc = fc_slave_alloc,
.change_queue_depth = fc_change_queue_depth,
.change_queue_type = fc_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
};
/**
* fcoe_interface_setup() - Setup a FCoE interface
* @fcoe: The new FCoE interface
* @netdev: The net device that the fcoe interface is on
*
* Returns : 0 for success
* Locking: must be called with the RTNL mutex held
*/
static int fcoe_interface_setup(struct fcoe_interface *fcoe,
struct net_device *netdev)
{
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct netdev_hw_addr *ha;
struct net_device *real_dev;
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
fcoe->netdev = netdev;
/* Let LLD initialize for FCoE */
ops = netdev->netdev_ops;
if (ops->ndo_fcoe_enable) {
if (ops->ndo_fcoe_enable(netdev))
FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
" specific feature for LLD.\n");
}
/* Do not support for bonding device */
if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
return -EOPNOTSUPP;
}
/* look for SAN MAC address, if multiple SAN MACs exist, only
* use the first one for SPMA */
real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
vlan_dev_real_dev(netdev) : netdev;
rcu_read_lock();
for_each_dev_addr(real_dev, ha) {
if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
(is_valid_ether_addr(ha->addr))) {
memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
fip->spma = 1;
break;
}
}
rcu_read_unlock();
/* setup Source Mac Address */
if (!fip->spma)
memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
/*
* Add FCoE MAC address as second unicast MAC address
* or enter promiscuous mode if not capable of listening
* for multiple unicast MACs.
*/
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_uc_add(netdev, flogi_maddr);
if (fip->spma)
dev_uc_add(netdev, fip->ctl_src_addr);
if (fip->mode == FIP_MODE_VN2VN) {
dev_mc_add(netdev, FIP_ALL_VN2VN_MACS);
dev_mc_add(netdev, FIP_ALL_P2P_MACS);
} else
dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
/*
* setup the receive function from ethernet driver
* on the ethertype for the given device
*/
fcoe->fcoe_packet_type.func = fcoe_rcv;
fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
fcoe->fcoe_packet_type.dev = netdev;
dev_add_pack(&fcoe->fcoe_packet_type);
fcoe->fip_packet_type.func = fcoe_fip_recv;
fcoe->fip_packet_type.type = htons(ETH_P_FIP);
fcoe->fip_packet_type.dev = netdev;
dev_add_pack(&fcoe->fip_packet_type);
return 0;
}
/**
* fcoe_interface_create() - Create a FCoE interface on a net device
* @netdev: The net device to create the FCoE interface on
* @fip_mode: The mode to use for FIP
*
* Returns: pointer to a struct fcoe_interface or NULL on error
*/
static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
enum fip_state fip_mode)
{
struct fcoe_interface *fcoe;
int err;
if (!try_module_get(THIS_MODULE)) {
FCOE_NETDEV_DBG(netdev,
"Could not get a reference to the module\n");
fcoe = ERR_PTR(-EBUSY);
goto out;
}
fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
if (!fcoe) {
FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
fcoe = ERR_PTR(-ENOMEM);
goto out_nomod;
}
dev_hold(netdev);
kref_init(&fcoe->kref);
/*
* Initialize FIP.
*/
fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
fcoe->ctlr.send = fcoe_fip_send;
fcoe->ctlr.update_mac = fcoe_update_src_mac;
fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
err = fcoe_interface_setup(fcoe, netdev);
if (err) {
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
fcoe = ERR_PTR(err);
goto out_nomod;
}
goto out;
out_nomod:
module_put(THIS_MODULE);
out:
return fcoe;
}
/**
* fcoe_interface_release() - fcoe_port kref release function
* @kref: Embedded reference count in an fcoe_interface struct
*/
static void fcoe_interface_release(struct kref *kref)
{
struct fcoe_interface *fcoe;
struct net_device *netdev;
fcoe = container_of(kref, struct fcoe_interface, kref);
netdev = fcoe->netdev;
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
module_put(THIS_MODULE);
}
/**
* fcoe_interface_get() - Get a reference to a FCoE interface
* @fcoe: The FCoE interface to be held
*/
static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
{
kref_get(&fcoe->kref);
}
/**
* fcoe_interface_put() - Put a reference to a FCoE interface
* @fcoe: The FCoE interface to be released
*/
static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
{
kref_put(&fcoe->kref, fcoe_interface_release);
}
/**
* fcoe_interface_cleanup() - Clean up a FCoE interface
* @fcoe: The FCoE interface to be cleaned up
*
* Caller must be holding the RTNL mutex
*/
void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = &fcoe->ctlr;
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
/* Logout of the fabric */
fc_fabric_logoff(fcoe->ctlr.lp);
/* Cleanup the fc_lport */
fc_lport_destroy(fcoe->ctlr.lp);
/* Stop the transmit retry timer */
del_timer_sync(&port->timer);
/* Free existing transmit skbs */
fcoe_clean_pending_queue(fcoe->ctlr.lp);
/*
* Don't listen for Ethernet packets anymore.
* synchronize_net() ensures that the packet handlers are not running
* on another CPU. dev_remove_pack() would do that, this calls the
* unsyncronized version __dev_remove_pack() to avoid multiple delays.
*/
__dev_remove_pack(&fcoe->fcoe_packet_type);
__dev_remove_pack(&fcoe->fip_packet_type);
synchronize_net();
/* Delete secondary MAC addresses */
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_uc_del(netdev, flogi_maddr);
if (fip->spma)
dev_uc_del(netdev, fip->ctl_src_addr);
if (fip->mode == FIP_MODE_VN2VN) {
dev_mc_del(netdev, FIP_ALL_VN2VN_MACS);
dev_mc_del(netdev, FIP_ALL_P2P_MACS);
} else
dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
if (!is_zero_ether_addr(port->data_src_addr))
dev_uc_del(netdev, port->data_src_addr);
/* Tell the LLD we are done w/ FCoE */
ops = netdev->netdev_ops;
if (ops->ndo_fcoe_disable) {
if (ops->ndo_fcoe_disable(netdev))
FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
" specific feature for LLD.\n");
}
fcoe_interface_put(fcoe);
}
/**
* fcoe_fip_recv() - Handler for received FIP frames
* @skb: The receive skb
* @netdev: The associated net device
* @ptype: The packet_type structure which was used to register this handler
* @orig_dev: The original net_device the the skb was received on.
* (in case dev is a bond)
*
* Returns: 0 for success
*/
static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
struct packet_type *ptype,
struct net_device *orig_dev)
{
struct fcoe_interface *fcoe;
fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
fcoe_ctlr_recv(&fcoe->ctlr, skb);
return 0;
}
/**
* fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
* @fip: The FCoE controller
* @skb: The FIP packet to be sent
*/
static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
skb->dev = fcoe_from_ctlr(fip)->netdev;
dev_queue_xmit(skb);
}
/**
* fcoe_update_src_mac() - Update the Ethernet MAC filters
* @lport: The local port to update the source MAC on
* @addr: Unicast MAC address to add
*
* Remove any previously-set unicast MAC filter.
* Add secondary FCoE MAC address filter for our OUI.
*/
static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
dev_uc_del(fcoe->netdev, port->data_src_addr);
if (!is_zero_ether_addr(addr))
dev_uc_add(fcoe->netdev, addr);
memcpy(port->data_src_addr, addr, ETH_ALEN);
rtnl_unlock();
}
/**
* fcoe_get_src_mac() - return the Ethernet source address for an lport
* @lport: libfc lport
*/
static u8 *fcoe_get_src_mac(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
return port->data_src_addr;
}
/**
* fcoe_lport_config() - Set up a local port
* @lport: The local port to be setup
*
* Returns: 0 for success
*/
static int fcoe_lport_config(struct fc_lport *lport)
{
lport->link_up = 0;
lport->qfull = 0;
lport->max_retry_count = 3;
lport->max_rport_retry_count = 3;
lport->e_d_tov = 2 * 1000; /* FC-FS default */
lport->r_a_tov = 2 * 2 * 1000;
lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
lport->does_npiv = 1;
fc_lport_init_stats(lport);
/* lport fc_lport related configuration */
fc_lport_config(lport);
/* offload related configuration */
lport->crc_offload = 0;
lport->seq_offload = 0;
lport->lro_enabled = 0;
lport->lro_xid = 0;
lport->lso_max = 0;
return 0;
}
/**
* fcoe_get_wwn() - Get the world wide name from LLD if it supports it
* @netdev: the associated net device
* @wwn: the output WWN
* @type: the type of WWN (WWPN or WWNN)
*
* Returns: 0 for success
*/
static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
const struct net_device_ops *ops = netdev->netdev_ops;
if (ops->ndo_fcoe_get_wwn)
return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
return -EINVAL;
}
/**
* fcoe_netdev_features_change - Updates the lport's offload flags based
* on the LLD netdev's FCoE feature flags
*/
static void fcoe_netdev_features_change(struct fc_lport *lport,
struct net_device *netdev)
{
mutex_lock(&lport->lp_mutex);
if (netdev->features & NETIF_F_SG)
lport->sg_supp = 1;
else
lport->sg_supp = 0;
if (netdev->features & NETIF_F_FCOE_CRC) {
lport->crc_offload = 1;
FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
} else {
lport->crc_offload = 0;
}
if (netdev->features & NETIF_F_FSO) {
lport->seq_offload = 1;
lport->lso_max = netdev->gso_max_size;
FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
lport->lso_max);
} else {
lport->seq_offload = 0;
lport->lso_max = 0;
}
if (netdev->fcoe_ddp_xid) {
lport->lro_enabled = 1;
lport->lro_xid = netdev->fcoe_ddp_xid;
FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
lport->lro_xid);
} else {
lport->lro_enabled = 0;
lport->lro_xid = 0;
}
mutex_unlock(&lport->lp_mutex);
}
/**
* fcoe_netdev_config() - Set up net devive for SW FCoE
* @lport: The local port that is associated with the net device
* @netdev: The associated net device
*
* Must be called after fcoe_lport_config() as it will use local port mutex
*
* Returns: 0 for success
*/
static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
{
u32 mfs;
u64 wwnn, wwpn;
struct fcoe_interface *fcoe;
struct fcoe_port *port;
/* Setup lport private data to point to fcoe softc */
port = lport_priv(lport);
fcoe = port->priv;
/*
* Determine max frame size based on underlying device and optional
* user-configured limit. If the MFS is too low, fcoe_link_ok()
* will return 0, so do this first.
*/
mfs = netdev->mtu;
if (netdev->features & NETIF_F_FCOE_MTU) {
mfs = FCOE_MTU;
FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
}
mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
if (fc_set_mfs(lport, mfs))
return -EINVAL;
/* offload features support */
fcoe_netdev_features_change(lport, netdev);
skb_queue_head_init(&port->fcoe_pending_queue);
port->fcoe_pending_queue_active = 0;
setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
fcoe_link_speed_update(lport);
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
fc_set_wwnn(lport, wwnn);
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
2, 0);
fc_set_wwpn(lport, wwpn);
}
return 0;
}
/**
* fcoe_shost_config() - Set up the SCSI host associated with a local port
* @lport: The local port
* @dev: The device associated with the SCSI host
*
* Must be called after fcoe_lport_config() and fcoe_netdev_config()
*
* Returns: 0 for success
*/
static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
{
int rc = 0;
/* lport scsi host config */
lport->host->max_lun = FCOE_MAX_LUN;
lport->host->max_id = FCOE_MAX_FCP_TARGET;
lport->host->max_channel = 0;
lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
if (lport->vport)
lport->host->transportt = fcoe_vport_scsi_transport;
else
lport->host->transportt = fcoe_nport_scsi_transport;
/* add the new host to the SCSI-ml */
rc = scsi_add_host(lport->host, dev);
if (rc) {
FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
"error on scsi_add_host\n");
return rc;
}
if (!lport->vport)
fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
"%s v%s over %s", FCOE_NAME, FCOE_VERSION,
fcoe_netdev(lport)->name);
return 0;
}
/**
* fcoe_oem_match() - The match routine for the offloaded exchange manager
* @fp: The I/O frame
*
* This routine will be associated with an exchange manager (EM). When
* the libfc exchange handling code is looking for an EM to use it will
* call this routine and pass it the frame that it wishes to send. This
* routine will return True if the associated EM is to be used and False
* if the echange code should continue looking for an EM.
*
* The offload EM that this routine is associated with will handle any
* packets that are for SCSI read requests.
*
* This has been enhanced to work when FCoE stack is operating in target
* mode.
*
* Returns: True for read types I/O, otherwise returns false.
*/
bool fcoe_oem_match(struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fcp_cmnd *fcp;
if (fc_fcp_is_read(fr_fsp(fp)) &&
(fr_fsp(fp)->data_len > fcoe_ddp_min))
return true;
else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
fcp = fc_frame_payload_get(fp, sizeof(*fcp));
if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
(fcp->fc_flags & FCP_CFL_WRDATA))
return true;
}
return false;
}
/**
* fcoe_em_config() - Allocate and configure an exchange manager
* @lport: The local port that the new EM will be associated with
*
* Returns: 0 on success
*/
static inline int fcoe_em_config(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_interface *oldfcoe = NULL;
struct net_device *old_real_dev, *cur_real_dev;
u16 min_xid = FCOE_MIN_XID;
u16 max_xid = FCOE_MAX_XID;
/*
* Check if need to allocate an em instance for
* offload exchange ids to be shared across all VN_PORTs/lport.
*/
if (!lport->lro_enabled || !lport->lro_xid ||
(lport->lro_xid >= max_xid)) {
lport->lro_xid = 0;
goto skip_oem;
}
/*
* Reuse existing offload em instance in case
* it is already allocated on real eth device
*/
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
else
cur_real_dev = fcoe->netdev;
list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
else
old_real_dev = oldfcoe->netdev;
if (cur_real_dev == old_real_dev) {
fcoe->oem = oldfcoe->oem;
break;
}
}
if (fcoe->oem) {
if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
printk(KERN_ERR "fcoe_em_config: failed to add "
"offload em:%p on interface:%s\n",
fcoe->oem, fcoe->netdev->name);
return -ENOMEM;
}
} else {
fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
FCOE_MIN_XID, lport->lro_xid,
fcoe_oem_match);
if (!fcoe->oem) {
printk(KERN_ERR "fcoe_em_config: failed to allocate "
"em for offload exches on interface:%s\n",
fcoe->netdev->name);
return -ENOMEM;
}
}
/*
* Exclude offload EM xid range from next EM xid range.
*/
min_xid += lport->lro_xid + 1;
skip_oem:
if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
printk(KERN_ERR "fcoe_em_config: failed to "
"allocate em on interface %s\n", fcoe->netdev->name);
return -ENOMEM;
}
return 0;
}
/**
* fcoe_if_destroy() - Tear down a SW FCoE instance
* @lport: The local port to be destroyed
*
*/
static void fcoe_if_destroy(struct fc_lport *lport)
{
/* Free queued packets for the per-CPU receive threads */
fcoe_percpu_clean(lport);
/* Detach from the scsi-ml */
fc_remove_host(lport->host);
scsi_remove_host(lport->host);
/* Destroy lport scsi_priv */
fc_fcp_destroy(lport);
/* There are no more rports or I/O, free the EM */
fc_exch_mgr_free(lport);
/* Free memory used by statistical counters */
fc_lport_free_stats(lport);
/* Release the Scsi_Host */
scsi_host_put(lport->host);
}
/**
* fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device
* @lport: The local port to setup DDP for
* @xid: The exchange ID for this DDP transfer
* @sgl: The scatterlist describing this transfer
* @sgc: The number of sg items
*
* Returns: 0 if the DDP context was not configured
*/
static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
struct net_device *netdev = fcoe_netdev(lport);
if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
xid, sgl,
sgc);
return 0;
}
/**
* fcoe_ddp_done() - Call a LLD's ddp_done through the net device
* @lport: The local port to complete DDP on
* @xid: The exchange ID for this DDP transfer
*
* Returns: the length of data that have been completed by DDP
*/
static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
{
struct net_device *netdev = fcoe_netdev(lport);
if (netdev->netdev_ops->ndo_fcoe_ddp_done)
return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
return 0;
}
/**
* fcoe_if_create() - Create a FCoE instance on an interface
* @fcoe: The FCoE interface to create a local port on
* @parent: The device pointer to be the parent in sysfs for the SCSI host
* @npiv: Indicates if the port is a vport or not
*
* Creates a fc_lport instance and a Scsi_Host instance and configure them.
*
* Returns: The allocated fc_lport or an error pointer
*/
static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct device *parent, int npiv)
{
struct net_device *netdev = fcoe->netdev;
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
struct Scsi_Host *shost;
int rc;
/*
* parent is only a vport if npiv is 1,
* but we'll only use vport in that case so go ahead and set it
*/
struct fc_vport *vport = dev_to_vport(parent);
FCOE_NETDEV_DBG(netdev, "Create Interface\n");
if (!npiv)
lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
else
lport = libfc_vport_create(vport, sizeof(*port));
if (!lport) {
FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
rc = -ENOMEM;
goto out;
}
port = lport_priv(lport);
port->lport = lport;
port->priv = fcoe;
port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
INIT_WORK(&port->destroy_work, fcoe_destroy_work);
/* configure a fc_lport including the exchange manager */
rc = fcoe_lport_config(lport);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
"interface\n");
goto out_host_put;
}
if (npiv) {
FCOE_NETDEV_DBG(netdev, "Setting vport names, "
"%16.16llx %16.16llx\n",
vport->node_name, vport->port_name);
fc_set_wwnn(lport, vport->node_name);
fc_set_wwpn(lport, vport->port_name);
}
/* configure lport network properties */
rc = fcoe_netdev_config(lport, netdev);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
"interface\n");
goto out_lp_destroy;
}
/* configure lport scsi host properties */
rc = fcoe_shost_config(lport, parent);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
"interface\n");
goto out_lp_destroy;
}
/* Initialize the library */
rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
"interface\n");
goto out_lp_destroy;
}
/*
* fcoe_em_alloc() and fcoe_hostlist_add() both
* need to be atomic with respect to other changes to the
* hostlist since fcoe_em_alloc() looks for an existing EM
* instance on host list updated by fcoe_hostlist_add().
*
* This is currently handled through the fcoe_config_mutex
* begin held.
*/
if (!npiv)
/* lport exch manager allocation */
rc = fcoe_em_config(lport);
else {
shost = vport_to_shost(vport);
n_port = shost_priv(shost);
rc = fc_exch_mgr_list_clone(n_port, lport);
}
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
goto out_lp_destroy;
}
fcoe_interface_get(fcoe);
return lport;
out_lp_destroy:
fc_exch_mgr_free(lport);
out_host_put:
scsi_host_put(lport->host);
out:
return ERR_PTR(rc);
}
/**
* fcoe_if_init() - Initialization routine for fcoe.ko
*
* Attaches the SW FCoE transport to the FC transport
*
* Returns: 0 on success
*/
static int __init fcoe_if_init(void)
{
/* attach to scsi transport */
fcoe_nport_scsi_transport =
fc_attach_transport(&fcoe_nport_fc_functions);
fcoe_vport_scsi_transport =
fc_attach_transport(&fcoe_vport_fc_functions);
if (!fcoe_nport_scsi_transport) {
printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
return -ENODEV;
}
return 0;
}
/**
* fcoe_if_exit() - Tear down fcoe.ko
*
* Detaches the SW FCoE transport from the FC transport
*
* Returns: 0 on success
*/
int __exit fcoe_if_exit(void)
{
fc_release_transport(fcoe_nport_scsi_transport);
fc_release_transport(fcoe_vport_scsi_transport);
fcoe_nport_scsi_transport = NULL;
fcoe_vport_scsi_transport = NULL;
return 0;
}
/**
* fcoe_percpu_thread_create() - Create a receive thread for an online CPU
* @cpu: The CPU index of the CPU to create a receive thread for
*/
static void fcoe_percpu_thread_create(unsigned int cpu)
{
struct fcoe_percpu_s *p;
struct task_struct *thread;
p = &per_cpu(fcoe_percpu, cpu);
thread = kthread_create(fcoe_percpu_receive_thread,
(void *)p, "fcoethread/%d", cpu);
if (likely(!IS_ERR(thread))) {
kthread_bind(thread, cpu);
wake_up_process(thread);
spin_lock_bh(&p->fcoe_rx_list.lock);
p->thread = thread;
spin_unlock_bh(&p->fcoe_rx_list.lock);
}
}
/**
* fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
* @cpu: The CPU index of the CPU whose receive thread is to be destroyed
*
* Destroys a per-CPU Rx thread. Any pending skbs are moved to the
* current CPU's Rx thread. If the thread being destroyed is bound to
* the CPU processing this context the skbs will be freed.
*/
static void fcoe_percpu_thread_destroy(unsigned int cpu)
{
struct fcoe_percpu_s *p;
struct task_struct *thread;
struct page *crc_eof;
struct sk_buff *skb;
#ifdef CONFIG_SMP
struct fcoe_percpu_s *p0;
unsigned targ_cpu = get_cpu();
#endif /* CONFIG_SMP */
FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
/* Prevent any new skbs from being queued for this CPU. */
p = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&p->fcoe_rx_list.lock);
thread = p->thread;
p->thread = NULL;
crc_eof = p->crc_eof_page;
p->crc_eof_page = NULL;
p->crc_eof_offset = 0;
spin_unlock_bh(&p->fcoe_rx_list.lock);
#ifdef CONFIG_SMP
/*
* Don't bother moving the skb's if this context is running
* on the same CPU that is having its thread destroyed. This
* can easily happen when the module is removed.
*/
if (cpu != targ_cpu) {
p0 = &per_cpu(fcoe_percpu, targ_cpu);
spin_lock_bh(&p0->fcoe_rx_list.lock);
if (p0->thread) {
FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
cpu, targ_cpu);
while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
__skb_queue_tail(&p0->fcoe_rx_list, skb);
spin_unlock_bh(&p0->fcoe_rx_list.lock);
} else {
/*
* The targeted CPU is not initialized and cannot accept
* new skbs. Unlock the targeted CPU and drop the skbs
* on the CPU that is going offline.
*/
while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
kfree_skb(skb);
spin_unlock_bh(&p0->fcoe_rx_list.lock);
}
} else {
/*
* This scenario occurs when the module is being removed
* and all threads are being destroyed. skbs will continue
* to be shifted from the CPU thread that is being removed
* to the CPU thread associated with the CPU that is processing
* the module removal. Once there is only one CPU Rx thread it
* will reach this case and we will drop all skbs and later
* stop the thread.
*/
spin_lock_bh(&p->fcoe_rx_list.lock);
while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
kfree_skb(skb);
spin_unlock_bh(&p->fcoe_rx_list.lock);
}
put_cpu();
#else
/*
* This a non-SMP scenario where the singular Rx thread is
* being removed. Free all skbs and stop the thread.
*/
spin_lock_bh(&p->fcoe_rx_list.lock);
while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
kfree_skb(skb);
spin_unlock_bh(&p->fcoe_rx_list.lock);
#endif
if (thread)
kthread_stop(thread);
if (crc_eof)
put_page(crc_eof);
}
/**
* fcoe_cpu_callback() - Handler for CPU hotplug events
* @nfb: The callback data block
* @action: The event triggering the callback
* @hcpu: The index of the CPU that the event is for
*
* This creates or destroys per-CPU data for fcoe
*
* Returns NOTIFY_OK always.
*/
static int fcoe_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned cpu = (unsigned long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
fcoe_percpu_thread_create(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
fcoe_percpu_thread_destroy(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
/**
* fcoe_rcv() - Receive packets from a net device
* @skb: The received packet
* @netdev: The net device that the packet was received on
* @ptype: The packet type context
* @olddev: The last device net device
*
* This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a
* FC frame and passes the frame to libfc.
*
* Returns: 0 for success
*/
int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
struct packet_type *ptype, struct net_device *olddev)
{
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct fcoe_percpu_s *fps;
struct ethhdr *eh;
unsigned int cpu;
fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
lport = fcoe->ctlr.lp;
if (unlikely(!lport)) {
FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
goto err2;
}
if (!lport->link_up)
goto err2;
FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
"data:%p tail:%p end:%p sum:%d dev:%s",
skb->len, skb->data_len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb),
skb->csum, skb->dev ? skb->dev->name : "<NULL>");
eh = eth_hdr(skb);
if (is_fip_mode(&fcoe->ctlr) &&
compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
eh->h_source);
goto err;
}
/*
* Check for minimum frame length, and make sure required FCoE
* and FC headers are pulled into the linear data area.
*/
if (unlikely((skb->len < FCOE_MIN_FRAME) ||
!pskb_may_pull(skb, FCOE_HEADER_LEN)))
goto err;
skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
fh = (struct fc_frame_header *) skb_transport_header(skb);
if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
eh->h_dest);
goto err;
}
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
fr->ptype = ptype;
/*
* In case the incoming frame's exchange is originated from
* the initiator, then received frame's exchange id is ANDed
* with fc_cpu_mask bits to get the same cpu on which exchange
* was originated, otherwise just use the current cpu.
*/
if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
else
cpu = smp_processor_id();
fps = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&fps->fcoe_rx_list.lock);
if (unlikely(!fps->thread)) {
/*
* The targeted CPU is not ready, let's target
* the first CPU now. For non-SMP systems this
* will check the same CPU twice.
*/
FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
"ready for incoming skb- using first online "
"CPU.\n");
spin_unlock_bh(&fps->fcoe_rx_list.lock);
cpu = cpumask_first(cpu_online_mask);
fps = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&fps->fcoe_rx_list.lock);
if (!fps->thread) {
spin_unlock_bh(&fps->fcoe_rx_list.lock);
goto err;
}
}
/*
* We now have a valid CPU that we're targeting for
* this skb. We also have this receive thread locked,
* so we're free to queue skbs into it's queue.
*/
/* If this is a SCSI-FCP frame, and this is already executing on the
* correct CPU, and the queue for this CPU is empty, then go ahead
* and process the frame directly in the softirq context.
* This lets us process completions without context switching from the
* NET_RX softirq, to our receive processing thread, and then back to
* BLOCK softirq context.
*/
if (fh->fh_type == FC_TYPE_FCP &&
cpu == smp_processor_id() &&
skb_queue_empty(&fps->fcoe_rx_list)) {
spin_unlock_bh(&fps->fcoe_rx_list.lock);
fcoe_recv_frame(skb);
} else {
__skb_queue_tail(&fps->fcoe_rx_list, skb);
if (fps->fcoe_rx_list.qlen == 1)
wake_up_process(fps->thread);
spin_unlock_bh(&fps->fcoe_rx_list.lock);
}
return 0;
err:
per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
put_cpu();
err2:
kfree_skb(skb);
return -1;
}
/**
* fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC
* @skb: The packet to be transmitted
* @tlen: The total length of the trailer
*
* Returns: 0 for success
*/
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
int rc;
fps = &get_cpu_var(fcoe_percpu);
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
put_cpu_var(fcoe_percpu);
return rc;
}
/**
* fcoe_xmit() - Transmit a FCoE frame
* @lport: The local port that the frame is to be transmitted for
* @fp: The frame to be transmitted
*
* Return: 0 for success
*/
int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
{
int wlen;
u32 crc;
struct ethhdr *eh;
struct fcoe_crc_eof *cp;
struct sk_buff *skb;
struct fcoe_dev_stats *stats;
struct fc_frame_header *fh;
unsigned int hlen; /* header length implies the version */
unsigned int tlen; /* trailer length */
unsigned int elen; /* eth header, may include vlan */
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
u8 sof, eof;
struct fcoe_hdr *hp;
WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
wlen = skb->len / FCOE_WORD_TO_BYTE;
if (!lport->link_up) {
kfree_skb(skb);
return 0;
}
if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
return 0;
sof = fr_sof(fp);
eof = fr_eof(fp);
elen = sizeof(struct ethhdr);
hlen = sizeof(struct fcoe_hdr);
tlen = sizeof(struct fcoe_crc_eof);
wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
/* crc offload */
if (likely(lport->crc_offload)) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_headroom(skb);
skb->csum_offset = skb->len;
crc = 0;
} else {
skb->ip_summed = CHECKSUM_NONE;
crc = fcoe_fc_crc(fp);
}
/* copy port crc and eof to the skb buff */
if (skb_is_nonlinear(skb)) {
skb_frag_t *frag;
if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
kfree_skb(skb);
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+ frag->page_offset;
} else {
cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
}
memset(cp, 0, sizeof(*cp));
cp->fcoe_eof = eof;
cp->fcoe_crc32 = cpu_to_le32(~crc);
if (skb_is_nonlinear(skb)) {
kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
cp = NULL;
}
/* adjust skb network/transport offsets to match mac/fcoe/port */
skb_push(skb, elen + hlen);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
skb->dev = fcoe->netdev;
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
if (fcoe->ctlr.map_dest)
memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
hp = (struct fcoe_hdr *)(eh + 1);
memset(hp, 0, sizeof(*hp));
if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
hp->fcoe_sof = sof;
/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
if (lport->seq_offload && fr_max_payload(fp)) {
skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
skb_shinfo(skb)->gso_size = fr_max_payload(fp);
} else {
skb_shinfo(skb)->gso_type = 0;
skb_shinfo(skb)->gso_size = 0;
}
/* update tx stats: regardless if LLD fails */
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->TxFrames++;
stats->TxWords += wlen;
put_cpu();
/* send down to lld */
fr_dev(fp) = lport;
if (port->fcoe_pending_queue.qlen)
fcoe_check_wait_queue(lport, skb);
else if (fcoe_start_io(skb))
fcoe_check_wait_queue(lport, skb);
return 0;
}
/**
* fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
* @skb: The completed skb (argument required by destructor)
*/
static void fcoe_percpu_flush_done(struct sk_buff *skb)
{
complete(&fcoe_flush_completion);
}
/**
* fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
* @lport: The local port the frame was received on
* @fp: The received frame
*
* Return: 0 on passing filtering checks
*/
static inline int fcoe_filter_frames(struct fc_lport *lport,
struct fc_frame *fp)
{
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct sk_buff *skb = (struct sk_buff *)fp;
struct fcoe_dev_stats *stats;
/*
* We only check CRC if no offload is available and if it is
* it's solicited data, in which case, the FCP layer would
* check it during the copy.
*/
if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
else
fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
fh = (struct fc_frame_header *) skb_transport_header(skb);
fh = fc_frame_header_get(fp);
if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
return 0;
fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
return -EINVAL;
}
if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
return 0;
}
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
return -EINVAL;
}
/**
* fcoe_recv_frame() - process a single received frame
* @skb: frame to process
*/
static void fcoe_recv_frame(struct sk_buff *skb)
{
u32 fr_len;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fcoe_dev_stats *stats;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fcoe_port *port;
struct fcoe_hdr *hp;
fr = fcoe_dev_from_skb(skb);
lport = fr->fr_dev;
if (unlikely(!lport)) {
if (skb->destructor != fcoe_percpu_flush_done)
FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
kfree_skb(skb);
return;
}
FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
skb->len, skb->data_len,
skb->head, skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->csum,
skb->dev ? skb->dev->name : "<NULL>");
port = lport_priv(lport);
if (skb_is_nonlinear(skb))
skb_linearize(skb); /* not ideal */
/*
* Frame length checks and setting up the header pointers
* was done in fcoe_rcv already.
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
printk(KERN_WARNING "fcoe: FCoE version "
"mismatch: The frame has "
"version %x, but the "
"initiator supports version "
"%x\n", FC_FCOE_DECAPS_VER(hp),
FC_FCOE_VER);
goto drop;
}
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
stats->RxFrames++;
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_dev(fp) = lport;
fr_sof(fp) = hp->fcoe_sof;
/* Copy out the CRC and EOF trailer for access */
if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
goto drop;
fr_eof(fp) = crc_eof.fcoe_eof;
fr_crc(fp) = crc_eof.fcoe_crc32;
if (pskb_trim(skb, fr_len))
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
put_cpu();
fc_exch_recv(lport, fp);
return;
}
drop:
stats->ErrorFrames++;
put_cpu();
kfree_skb(skb);
}
/**
* fcoe_percpu_receive_thread() - The per-CPU packet receive thread
* @arg: The per-CPU context
*
* Return: 0 for success
*/
int fcoe_percpu_receive_thread(void *arg)
{
struct fcoe_percpu_s *p = arg;
struct sk_buff *skb;
set_user_nice(current, -20);
while (!kthread_should_stop()) {
spin_lock_bh(&p->fcoe_rx_list.lock);
while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&p->fcoe_rx_list.lock);
schedule();
set_current_state(TASK_RUNNING);
if (kthread_should_stop())
return 0;
spin_lock_bh(&p->fcoe_rx_list.lock);
}
spin_unlock_bh(&p->fcoe_rx_list.lock);
fcoe_recv_frame(skb);
}
return 0;
}
/**
* fcoe_dev_setup() - Setup the link change notification interface
*/
static void fcoe_dev_setup(void)
{
register_netdevice_notifier(&fcoe_notifier);
}
/**
* fcoe_dev_cleanup() - Cleanup the link change notification interface
*/
static void fcoe_dev_cleanup(void)
{
unregister_netdevice_notifier(&fcoe_notifier);
}
/**
* fcoe_device_notification() - Handler for net device events
* @notifier: The context of the notification
* @event: The type of event
* @ptr: The net device that the event was on
*
* This function is called by the Ethernet driver in case of link change event.
*
* Returns: 0 for success
*/
static int fcoe_device_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct fc_lport *lport = NULL;
struct net_device *netdev = ptr;
struct fcoe_interface *fcoe;
struct fcoe_port *port;
struct fcoe_dev_stats *stats;
u32 link_possible = 1;
u32 mfs;
int rc = NOTIFY_OK;
list_for_each_entry(fcoe, &fcoe_hostlist, list) {
if (fcoe->netdev == netdev) {
lport = fcoe->ctlr.lp;
break;
}
}
if (!lport) {
rc = NOTIFY_DONE;
goto out;
}
switch (event) {
case NETDEV_DOWN:
case NETDEV_GOING_DOWN:
link_possible = 0;
break;
case NETDEV_UP:
case NETDEV_CHANGE:
break;
case NETDEV_CHANGEMTU:
if (netdev->features & NETIF_F_FCOE_MTU)
break;
mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
sizeof(struct fcoe_crc_eof));
if (mfs >= FC_MIN_MAX_FRAME)
fc_set_mfs(lport, mfs);
break;
case NETDEV_REGISTER:
break;
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
case NETDEV_FEAT_CHANGE:
fcoe_netdev_features_change(lport, netdev);
break;
default:
FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
"from netdev netlink\n", event);
}
fcoe_link_speed_update(lport);
if (link_possible && !fcoe_link_ok(lport))
fcoe_ctlr_link_up(&fcoe->ctlr);
else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->LinkFailureCount++;
put_cpu();
fcoe_clean_pending_queue(lport);
}
out:
return rc;
}
/**
* fcoe_disable() - Disables a FCoE interface
* @netdev : The net_device object the Ethernet interface to create on
*
* Called from fcoe transport.
*
* Returns: 0 for success
*/
static int fcoe_disable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
rtnl_lock();
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
if (fcoe) {
fcoe_ctlr_link_down(&fcoe->ctlr);
fcoe_clean_pending_queue(fcoe->ctlr.lp);
} else
rc = -ENODEV;
mutex_unlock(&fcoe_config_mutex);
return rc;
}
/**
* fcoe_enable() - Enables a FCoE interface
* @netdev : The net_device object the Ethernet interface to create on
*
* Called from fcoe transport.
*
* Returns: 0 for success
*/
static int fcoe_enable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
rtnl_lock();
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
if (!fcoe)
rc = -ENODEV;
else if (!fcoe_link_ok(fcoe->ctlr.lp))
fcoe_ctlr_link_up(&fcoe->ctlr);
mutex_unlock(&fcoe_config_mutex);
return rc;
}
/**
* fcoe_destroy() - Destroy a FCoE interface
* @netdev : The net_device object the Ethernet interface to create on
*
* Called from fcoe transport
*
* Returns: 0 for success
*/
static int fcoe_destroy(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
struct fc_lport *lport;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
rtnl_lock();
fcoe = fcoe_hostlist_lookup_port(netdev);
if (!fcoe) {
rtnl_unlock();
rc = -ENODEV;
goto out_nodev;
}
lport = fcoe->ctlr.lp;
list_del(&fcoe->list);
fcoe_interface_cleanup(fcoe);
rtnl_unlock();
fcoe_if_destroy(lport);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
}
/**
* fcoe_destroy_work() - Destroy a FCoE port in a deferred work context
* @work: Handle to the FCoE port to be destroyed
*/
static void fcoe_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
port = container_of(work, struct fcoe_port, destroy_work);
mutex_lock(&fcoe_config_mutex);
fcoe_if_destroy(port->lport);
mutex_unlock(&fcoe_config_mutex);
}
/**
* fcoe_match() - Check if the FCoE is supported on the given netdevice
* @netdev : The net_device object the Ethernet interface to create on
*
* Called from fcoe transport.
*
* Returns: always returns true as this is the default FCoE transport,
* i.e., support all netdevs.
*/
static bool fcoe_match(struct net_device *netdev)
{
return true;
}
/**
* fcoe_create() - Create a fcoe interface
* @netdev : The net_device object the Ethernet interface to create on
* @fip_mode: The FIP mode for this creation
*
* Called from fcoe transport
*
* Returns: 0 for success
*/
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
int rc;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
mutex_lock(&fcoe_config_mutex);
rtnl_lock();
/* look for existing lport */
if (fcoe_hostlist_lookup(netdev)) {
rc = -EEXIST;
goto out_nodev;
}
fcoe = fcoe_interface_create(netdev, fip_mode);
if (IS_ERR(fcoe)) {
rc = PTR_ERR(fcoe);
goto out_nodev;
}
lport = fcoe_if_create(fcoe, &netdev->dev, 0);
if (IS_ERR(lport)) {
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
rc = -EIO;
fcoe_interface_cleanup(fcoe);
goto out_free;
}
/* Make this the "master" N_Port */
fcoe->ctlr.lp = lport;
/* add to lports list */
fcoe_hostlist_add(lport);
/* start FIP Discovery and FLOGI */
lport->boot_time = jiffies;
fc_fabric_login(lport);
if (!fcoe_link_ok(lport))
fcoe_ctlr_link_up(&fcoe->ctlr);
/*
* Release from init in fcoe_interface_create(), on success lport
* should be holding a reference taken in fcoe_if_create().
*/
fcoe_interface_put(fcoe);
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return 0;
out_free:
fcoe_interface_put(fcoe);
out_nodev:
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return rc;
}
/**
* fcoe_link_speed_update() - Update the supported and actual link speeds
* @lport: The local port to update speeds for
*
* Returns: 0 if the ethtool query was successful
* -1 if the ethtool query failed
*/
int fcoe_link_speed_update(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd;
if (!dev_ethtool_get_settings(netdev, &ecmd)) {
lport->link_supported_speeds &=
~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
if (ecmd.supported & (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full))
lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
if (ecmd.supported & SUPPORTED_10000baseT_Full)
lport->link_supported_speeds |=
FC_PORTSPEED_10GBIT;
switch (ethtool_cmd_speed(&ecmd)) {
case SPEED_1000:
lport->link_speed = FC_PORTSPEED_1GBIT;
break;
case SPEED_10000:
lport->link_speed = FC_PORTSPEED_10GBIT;
break;
}
return 0;
}
return -1;
}
/**
* fcoe_link_ok() - Check if the link is OK for a local port
* @lport: The local port to check link on
*
* Returns: 0 if link is UP and OK, -1 if not
*
*/
int fcoe_link_ok(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
if (netif_oper_up(netdev))
return 0;
return -1;
}
/**
* fcoe_percpu_clean() - Clear all pending skbs for an local port
* @lport: The local port whose skbs are to be cleared
*
* Must be called with fcoe_create_mutex held to single-thread completion.
*
* This flushes the pending skbs by adding a new skb to each queue and
* waiting until they are all freed. This assures us that not only are
* there no packets that will be handled by the lport, but also that any
* threads already handling packet have returned.
*/
void fcoe_percpu_clean(struct fc_lport *lport)
{
struct fcoe_percpu_s *pp;
struct fcoe_rcv_info *fr;
struct sk_buff_head *list;
struct sk_buff *skb, *next;
struct sk_buff *head;
unsigned int cpu;
for_each_possible_cpu(cpu) {
pp = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&pp->fcoe_rx_list.lock);
list = &pp->fcoe_rx_list;
head = list->next;
for (skb = head; skb != (struct sk_buff *)list;
skb = next) {
next = skb->next;
fr = fcoe_dev_from_skb(skb);
if (fr->fr_dev == lport) {
__skb_unlink(skb, list);
kfree_skb(skb);
}
}
if (!pp->thread || !cpu_online(cpu)) {
spin_unlock_bh(&pp->fcoe_rx_list.lock);
continue;
}
skb = dev_alloc_skb(0);
if (!skb) {
spin_unlock_bh(&pp->fcoe_rx_list.lock);
continue;
}
skb->destructor = fcoe_percpu_flush_done;
__skb_queue_tail(&pp->fcoe_rx_list, skb);
if (pp->fcoe_rx_list.qlen == 1)
wake_up_process(pp->thread);
spin_unlock_bh(&pp->fcoe_rx_list.lock);
wait_for_completion(&fcoe_flush_completion);
}
}
/**
* fcoe_reset() - Reset a local port
* @shost: The SCSI host associated with the local port to be reset
*
* Returns: Always 0 (return value required by FC transport template)
*/
int fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
fcoe_ctlr_link_down(&fcoe->ctlr);
fcoe_clean_pending_queue(fcoe->ctlr.lp);
if (!fcoe_link_ok(fcoe->ctlr.lp))
fcoe_ctlr_link_up(&fcoe->ctlr);
return 0;
}
/**
* fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device
* @netdev: The net device used as a key
*
* Locking: Must be called with the RNL mutex held.
*
* Returns: NULL or the FCoE interface
*/
static struct fcoe_interface *
fcoe_hostlist_lookup_port(const struct net_device *netdev)
{
struct fcoe_interface *fcoe;
list_for_each_entry(fcoe, &fcoe_hostlist, list) {
if (fcoe->netdev == netdev)
return fcoe;
}
return NULL;
}
/**
* fcoe_hostlist_lookup() - Find the local port associated with a
* given net device
* @netdev: The netdevice used as a key
*
* Locking: Must be called with the RTNL mutex held
*
* Returns: NULL or the local port
*/
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
{
struct fcoe_interface *fcoe;
fcoe = fcoe_hostlist_lookup_port(netdev);
return (fcoe) ? fcoe->ctlr.lp : NULL;
}
/**
* fcoe_hostlist_add() - Add the FCoE interface identified by a local
* port to the hostlist
* @lport: The local port that identifies the FCoE interface to be added
*
* Locking: must be called with the RTNL mutex held
*
* Returns: 0 for success
*/
static int fcoe_hostlist_add(const struct fc_lport *lport)
{
struct fcoe_interface *fcoe;
struct fcoe_port *port;
fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
if (!fcoe) {
port = lport_priv(lport);
fcoe = port->priv;
list_add_tail(&fcoe->list, &fcoe_hostlist);
}
return 0;
}
static struct fcoe_transport fcoe_sw_transport = {
.name = {FCOE_TRANSPORT_DEFAULT},
.attached = false,
.list = LIST_HEAD_INIT(fcoe_sw_transport.list),
.match = fcoe_match,
.create = fcoe_create,
.destroy = fcoe_destroy,
.enable = fcoe_enable,
.disable = fcoe_disable,
};
/**
* fcoe_init() - Initialize fcoe.ko
*
* Returns: 0 on success, or a negative value on failure
*/
static int __init fcoe_init(void)
{
struct fcoe_percpu_s *p;
unsigned int cpu;
int rc = 0;
fcoe_wq = alloc_workqueue("fcoe", 0, 0);
if (!fcoe_wq)
return -ENOMEM;
/* register as a fcoe transport */
rc = fcoe_transport_attach(&fcoe_sw_transport);
if (rc) {
printk(KERN_ERR "failed to register an fcoe transport, check "
"if libfcoe is loaded\n");
return rc;
}
mutex_lock(&fcoe_config_mutex);
for_each_possible_cpu(cpu) {
p = &per_cpu(fcoe_percpu, cpu);
skb_queue_head_init(&p->fcoe_rx_list);
}
for_each_online_cpu(cpu)
fcoe_percpu_thread_create(cpu);
/* Initialize per CPU interrupt thread */
rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
if (rc)
goto out_free;
/* Setup link change notification */
fcoe_dev_setup();
rc = fcoe_if_init();
if (rc)
goto out_free;
mutex_unlock(&fcoe_config_mutex);
return 0;
out_free:
for_each_online_cpu(cpu) {
fcoe_percpu_thread_destroy(cpu);
}
mutex_unlock(&fcoe_config_mutex);
destroy_workqueue(fcoe_wq);
return rc;
}
module_init(fcoe_init);
/**
* fcoe_exit() - Clean up fcoe.ko
*
* Returns: 0 on success or a negative value on failure
*/
static void __exit fcoe_exit(void)
{
struct fcoe_interface *fcoe, *tmp;
struct fcoe_port *port;
unsigned int cpu;
mutex_lock(&fcoe_config_mutex);
fcoe_dev_cleanup();
/* releases the associated fcoe hosts */
rtnl_lock();
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
unregister_hotcpu_notifier(&fcoe_cpu_notifier);
for_each_online_cpu(cpu)
fcoe_percpu_thread_destroy(cpu);
mutex_unlock(&fcoe_config_mutex);
/*
* destroy_work's may be chained but destroy_workqueue()
* can take care of them. Just kill the fcoe_wq.
*/
destroy_workqueue(fcoe_wq);
/*
* Detaching from the scsi transport must happen after all
* destroys are done on the fcoe_wq. destroy_workqueue will
* enusre the fcoe_wq is flushed.
*/
fcoe_if_exit();
/* detach from fcoe transport */
fcoe_transport_detach(&fcoe_sw_transport);
}
module_exit(fcoe_exit);
/**
* fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
* @seq: active sequence in the FLOGI or FDISC exchange
* @fp: response frame, or error encoded in a pointer (timeout)
* @arg: pointer the the fcoe_ctlr structure
*
* This handles MAC address management for FCoE, then passes control on to
* the libfc FLOGI response handler.
*/
static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
struct fcoe_ctlr *fip = arg;
struct fc_exch *exch = fc_seq_exch(seq);
struct fc_lport *lport = exch->lp;
u8 *mac;
if (IS_ERR(fp))
goto done;
mac = fr_cb(fp)->granted_mac;
if (is_zero_ether_addr(mac)) {
/* pre-FIP */
if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
fc_frame_free(fp);
return;
}
}
fcoe_update_src_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
}
/**
* fcoe_logo_resp() - FCoE specific LOGO response handler
* @seq: active sequence in the LOGO exchange
* @fp: response frame, or error encoded in a pointer (timeout)
* @arg: pointer the the fcoe_ctlr structure
*
* This handles MAC address management for FCoE, then passes control on to
* the libfc LOGO response handler.
*/
static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
struct fc_lport *lport = arg;
static u8 zero_mac[ETH_ALEN] = { 0 };
if (!IS_ERR(fp))
fcoe_update_src_mac(lport, zero_mac);
fc_lport_logo_resp(seq, fp, lport);
}
/**
* fcoe_elsct_send - FCoE specific ELS handler
*
* This does special case handling of FIP encapsualted ELS exchanges for FCoE,
* using FCoE specific response handlers and passing the FIP controller as
* the argument (the lport is still available from the exchange).
*
* Most of the work here is just handed off to the libfc routine.
*/
static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
struct fc_frame *fp, unsigned int op,
void (*resp)(struct fc_seq *,
struct fc_frame *,
void *),
void *arg, u32 timeout)
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
case ELS_FLOGI:
case ELS_FDISC:
if (lport->point_to_multipoint)
break;
return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
fip, timeout);
case ELS_LOGO:
/* only hook onto fabric logouts, not port logouts */
if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
break;
return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
lport, timeout);
}
return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
}
/**
* fcoe_vport_create() - create an fc_host/scsi_host for a vport
* @vport: fc_vport object to create a new fc_host for
* @disabled: start the new fc_host in a disabled state by default?
*
* Returns: 0 for success
*/
static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
{
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fcoe_port *port = lport_priv(n_port);
struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
struct fc_lport *vn_port;
int rc;
char buf[32];
rc = fcoe_validate_vport_create(vport);
if (rc) {
wwn_to_str(vport->port_name, buf, sizeof(buf));
printk(KERN_ERR "fcoe: Failed to create vport, "
"WWPN (0x%s) already exists\n",
buf);
return rc;
}
mutex_lock(&fcoe_config_mutex);
vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
mutex_unlock(&fcoe_config_mutex);
if (IS_ERR(vn_port)) {
printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
netdev->name);
return -EIO;
}
if (disabled) {
fc_vport_set_state(vport, FC_VPORT_DISABLED);
} else {
vn_port->boot_time = jiffies;
fc_fabric_login(vn_port);
fc_vport_setlink(vn_port);
}
return 0;
}
/**
* fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport
* @vport: fc_vport object that is being destroyed
*
* Returns: 0 for success
*/
static int fcoe_vport_destroy(struct fc_vport *vport)
{
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fc_lport *vn_port = vport->dd_data;
struct fcoe_port *port = lport_priv(vn_port);
mutex_lock(&n_port->lp_mutex);
list_del(&vn_port->list);
mutex_unlock(&n_port->lp_mutex);
queue_work(fcoe_wq, &port->destroy_work);
return 0;
}
/**
* fcoe_vport_disable() - change vport state
* @vport: vport to bring online/offline
* @disable: should the vport be disabled?
*/
static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
{
struct fc_lport *lport = vport->dd_data;
if (disable) {
fc_vport_set_state(vport, FC_VPORT_DISABLED);
fc_fabric_logoff(lport);
} else {
lport->boot_time = jiffies;
fc_fabric_login(lport);
fc_vport_setlink(lport);
}
return 0;
}
/**
* fcoe_vport_set_symbolic_name() - append vport string to symbolic name
* @vport: fc_vport with a new symbolic name string
*
* After generating a new symbolic name string, a new RSPN_ID request is
* sent to the name server. There is no response handler, so if it fails
* for some reason it will not be retried.
*/
static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
{
struct fc_lport *lport = vport->dd_data;
struct fc_frame *fp;
size_t len;
snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
"%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
fcoe_netdev(lport)->name, vport->symbolic_name);
if (lport->state != LPORT_ST_READY)
return;
len = strnlen(fc_host_symbolic_name(lport->host), 255);
fp = fc_frame_alloc(lport,
sizeof(struct fc_ct_hdr) +
sizeof(struct fc_ns_rspn) + len);
if (!fp)
return;
lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
NULL, NULL, 3 * lport->r_a_tov);
}
/**
* fcoe_get_lesb() - Fill the FCoE Link Error Status Block
* @lport: the local port
* @fc_lesb: the link error status block
*/
static void fcoe_get_lesb(struct fc_lport *lport,
struct fc_els_lesb *fc_lesb)
{
unsigned int cpu;
u32 lfc, vlfc, mdac;
struct fcoe_dev_stats *devst;
struct fcoe_fc_els_lesb *lesb;
struct rtnl_link_stats64 temp;
struct net_device *netdev = fcoe_netdev(lport);
lfc = 0;
vlfc = 0;
mdac = 0;
lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
memset(lesb, 0, sizeof(*lesb));
for_each_possible_cpu(cpu) {
devst = per_cpu_ptr(lport->dev_stats, cpu);
lfc += devst->LinkFailureCount;
vlfc += devst->VLinkFailureCount;
mdac += devst->MissDiscAdvCount;
}
lesb->lesb_link_fail = htonl(lfc);
lesb->lesb_vlink_fail = htonl(vlfc);
lesb->lesb_miss_fka = htonl(mdac);
lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
}
/**
* fcoe_set_port_id() - Callback from libfc when Port_ID is set.
* @lport: the local port
* @port_id: the port ID
* @fp: the received frame, if any, that caused the port_id to be set.
*
* This routine handles the case where we received a FLOGI and are
* entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi()
* so it can set the non-mapped mode and gateway address.
*
* The FLOGI LS_ACC is handled by fcoe_flogi_resp().
*/
static void fcoe_set_port_id(struct fc_lport *lport,
u32 port_id, struct fc_frame *fp)
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
}
/**
* fcoe_validate_vport_create() - Validate a vport before creating it
* @vport: NPIV port to be created
*
* This routine is meant to add validation for a vport before creating it
* via fcoe_vport_create().
* Current validations are:
* - WWPN supplied is unique for given lport
*
*
*/
static int fcoe_validate_vport_create(struct fc_vport *vport)
{
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fc_lport *vn_port;
int rc = 0;
char buf[32];
mutex_lock(&n_port->lp_mutex);
wwn_to_str(vport->port_name, buf, sizeof(buf));
/* Check if the wwpn is not same as that of the lport */
if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
FCOE_DBG("vport WWPN 0x%s is same as that of the "
"base port WWPN\n", buf);
rc = -EINVAL;
goto out;
}
/* Check if there is any existing vport with same wwpn */
list_for_each_entry(vn_port, &n_port->vports, list) {
if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
FCOE_DBG("vport with given WWPN 0x%s already "
"exists\n", buf);
rc = -EINVAL;
break;
}
}
out:
mutex_unlock(&n_port->lp_mutex);
return rc;
}
| gpl-2.0 |
fledermaus/steamos_kernel | fs/ext4/indirect.c | 862 | 41979 | /*
* linux/fs/ext4/indirect.c
*
* from
*
* linux/fs/ext4/inode.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/inode.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Goal-directed block allocation by Stephen Tweedie
* (sct@redhat.com), 1993, 1998
*/
#include <linux/aio.h>
#include "ext4_jbd2.h"
#include "truncate.h"
#include "ext4_extents.h" /* Needed for EXT_MAX_BLOCKS */
#include <trace/events/ext4.h>
typedef struct {
__le32 *p;
__le32 key;
struct buffer_head *bh;
} Indirect;
static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
{
p->key = *(p->p = v);
p->bh = bh;
}
/**
* ext4_block_to_path - parse the block number into array of offsets
* @inode: inode in question (we are only interested in its superblock)
* @i_block: block number to be parsed
* @offsets: array to store the offsets in
* @boundary: set this non-zero if the referred-to block is likely to be
* followed (on disk) by an indirect block.
*
* To store the locations of file's data ext4 uses a data structure common
* for UNIX filesystems - tree of pointers anchored in the inode, with
* data blocks at leaves and indirect blocks in intermediate nodes.
* This function translates the block number into path in that tree -
* return value is the path length and @offsets[n] is the offset of
* pointer to (n+1)th node in the nth one. If @block is out of range
* (negative or too large) warning is printed and zero returned.
*
* Note: function doesn't find node addresses, so no IO is needed. All
* we need to know is the capacity of indirect blocks (taken from the
* inode->i_sb).
*/
/*
* Portability note: the last comparison (check that we fit into triple
* indirect block) is spelled differently, because otherwise on an
* architecture with 32-bit longs and 8Kb pages we might get into trouble
* if our filesystem had 8Kb blocks. We might use long long, but that would
* kill us on x86. Oh, well, at least the sign propagation does not matter -
* i_block would have to be negative in the very beginning, so we would not
* get there at all.
*/
static int ext4_block_to_path(struct inode *inode,
ext4_lblk_t i_block,
ext4_lblk_t offsets[4], int *boundary)
{
int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
const long direct_blocks = EXT4_NDIR_BLOCKS,
indirect_blocks = ptrs,
double_blocks = (1 << (ptrs_bits * 2));
int n = 0;
int final = 0;
if (i_block < direct_blocks) {
offsets[n++] = i_block;
final = direct_blocks;
} else if ((i_block -= direct_blocks) < indirect_blocks) {
offsets[n++] = EXT4_IND_BLOCK;
offsets[n++] = i_block;
final = ptrs;
} else if ((i_block -= indirect_blocks) < double_blocks) {
offsets[n++] = EXT4_DIND_BLOCK;
offsets[n++] = i_block >> ptrs_bits;
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
offsets[n++] = EXT4_TIND_BLOCK;
offsets[n++] = i_block >> (ptrs_bits * 2);
offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
i_block + direct_blocks +
indirect_blocks + double_blocks, inode->i_ino);
}
if (boundary)
*boundary = final - 1 - (i_block & (ptrs - 1));
return n;
}
/**
* ext4_get_branch - read the chain of indirect blocks leading to data
* @inode: inode in question
* @depth: depth of the chain (1 - direct pointer, etc.)
* @offsets: offsets of pointers in inode/indirect blocks
* @chain: place to store the result
* @err: here we store the error value
*
* Function fills the array of triples <key, p, bh> and returns %NULL
* if everything went OK or the pointer to the last filled triple
* (incomplete one) otherwise. Upon the return chain[i].key contains
* the number of (i+1)-th block in the chain (as it is stored in memory,
* i.e. little-endian 32-bit), chain[i].p contains the address of that
* number (it points into struct inode for i==0 and into the bh->b_data
* for i>0) and chain[i].bh points to the buffer_head of i-th indirect
* block for i>0 and NULL for i==0. In other words, it holds the block
* numbers of the chain, addresses they were taken from (and where we can
* verify that chain did not change) and buffer_heads hosting these
* numbers.
*
* Function stops when it stumbles upon zero pointer (absent block)
* (pointer to last triple returned, *@err == 0)
* or when it gets an IO error reading an indirect block
* (ditto, *@err == -EIO)
* or when it reads all @depth-1 indirect blocks successfully and finds
* the whole chain, all way to the data (returns %NULL, *err == 0).
*
* Need to be called with
* down_read(&EXT4_I(inode)->i_data_sem)
*/
static Indirect *ext4_get_branch(struct inode *inode, int depth,
ext4_lblk_t *offsets,
Indirect chain[4], int *err)
{
struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
int ret = -EIO;
*err = 0;
/* i_data is not going away, no lock needed */
add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
if (!p->key)
goto no_block;
while (--depth) {
bh = sb_getblk(sb, le32_to_cpu(p->key));
if (unlikely(!bh)) {
ret = -ENOMEM;
goto failure;
}
if (!bh_uptodate_or_lock(bh)) {
if (bh_submit_read(bh) < 0) {
put_bh(bh);
goto failure;
}
/* validate block references */
if (ext4_check_indirect_blockref(inode, bh)) {
put_bh(bh);
goto failure;
}
}
add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
/* Reader: end */
if (!p->key)
goto no_block;
}
return NULL;
failure:
*err = ret;
no_block:
return p;
}
/**
* ext4_find_near - find a place for allocation with sufficient locality
* @inode: owner
* @ind: descriptor of indirect block.
*
* This function returns the preferred place for block allocation.
* It is used when heuristic for sequential allocation fails.
* Rules are:
* + if there is a block to the left of our position - allocate near it.
* + if pointer will live in indirect block - allocate near that block.
* + if pointer will live in inode - allocate in the same
* cylinder group.
*
* In the latter case we colour the starting block by the callers PID to
* prevent it from clashing with concurrent allocations for a different inode
* in the same block group. The PID is used here so that functionally related
* files will be close-by on-disk.
*
* Caller must make sure that @ind is valid and will stay that way.
*/
static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
{
struct ext4_inode_info *ei = EXT4_I(inode);
__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
__le32 *p;
/* Try to find previous block */
for (p = ind->p - 1; p >= start; p--) {
if (*p)
return le32_to_cpu(*p);
}
/* No such thing, so let's try location of indirect block */
if (ind->bh)
return ind->bh->b_blocknr;
/*
* It is going to be referred to from the inode itself? OK, just put it
* into the same cylinder group then.
*/
return ext4_inode_to_goal_block(inode);
}
/**
* ext4_find_goal - find a preferred place for allocation.
* @inode: owner
* @block: block we want
* @partial: pointer to the last triple within a chain
*
* Normally this function find the preferred place for block allocation,
* returns it.
* Because this is only used for non-extent files, we limit the block nr
* to 32 bits.
*/
static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
Indirect *partial)
{
ext4_fsblk_t goal;
/*
* XXX need to get goal block from mballoc's data structures
*/
goal = ext4_find_near(inode, partial);
goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
return goal;
}
/**
* ext4_blks_to_allocate - Look up the block map and count the number
* of direct blocks need to be allocated for the given branch.
*
* @branch: chain of indirect blocks
* @k: number of blocks need for indirect blocks
* @blks: number of data blocks to be mapped.
* @blocks_to_boundary: the offset in the indirect block
*
* return the total number of blocks to be allocate, including the
* direct and indirect blocks.
*/
static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
int blocks_to_boundary)
{
unsigned int count = 0;
/*
* Simple case, [t,d]Indirect block(s) has not allocated yet
* then it's clear blocks on that path have not allocated
*/
if (k > 0) {
/* right now we don't handle cross boundary allocation */
if (blks < blocks_to_boundary + 1)
count += blks;
else
count += blocks_to_boundary + 1;
return count;
}
count++;
while (count < blks && count <= blocks_to_boundary &&
le32_to_cpu(*(branch[0].p + count)) == 0) {
count++;
}
return count;
}
/**
* ext4_alloc_branch - allocate and set up a chain of blocks.
* @handle: handle for this transaction
* @inode: owner
* @indirect_blks: number of allocated indirect blocks
* @blks: number of allocated direct blocks
* @goal: preferred place for allocation
* @offsets: offsets (in the blocks) to store the pointers to next.
* @branch: place to store the chain in.
*
* This function allocates blocks, zeroes out all but the last one,
* links them into chain and (if we are synchronous) writes them to disk.
* In other words, it prepares a branch that can be spliced onto the
* inode. It stores the information about that chain in the branch[], in
* the same format as ext4_get_branch() would do. We are calling it after
* we had read the existing part of chain and partial points to the last
* triple of that (one with zero ->key). Upon the exit we have the same
* picture as after the successful ext4_get_block(), except that in one
* place chain is disconnected - *branch->p is still zero (we did not
* set the last link), but branch->key contains the number that should
* be placed into *branch->p to fill that gap.
*
* If allocation fails we free all blocks we've allocated (and forget
* their buffer_heads) and return the error value the from failed
* ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
* as described above and return 0.
*/
static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, int indirect_blks,
int *blks, ext4_fsblk_t goal,
ext4_lblk_t *offsets, Indirect *branch)
{
struct ext4_allocation_request ar;
struct buffer_head * bh;
ext4_fsblk_t b, new_blocks[4];
__le32 *p;
int i, j, err, len = 1;
/*
* Set up for the direct block allocation
*/
memset(&ar, 0, sizeof(ar));
ar.inode = inode;
ar.len = *blks;
ar.logical = iblock;
if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA;
for (i = 0; i <= indirect_blks; i++) {
if (i == indirect_blks) {
ar.goal = goal;
new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err);
} else
goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode,
goal, 0, NULL, &err);
if (err) {
i--;
goto failed;
}
branch[i].key = cpu_to_le32(new_blocks[i]);
if (i == 0)
continue;
bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]);
if (unlikely(!bh)) {
err = -ENOMEM;
goto failed;
}
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
err = ext4_journal_get_create_access(handle, bh);
if (err) {
unlock_buffer(bh);
goto failed;
}
memset(bh->b_data, 0, bh->b_size);
p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
b = new_blocks[i];
if (i == indirect_blks)
len = ar.len;
for (j = 0; j < len; j++)
*p++ = cpu_to_le32(b++);
BUFFER_TRACE(bh, "marking uptodate");
set_buffer_uptodate(bh);
unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto failed;
}
*blks = ar.len;
return 0;
failed:
for (; i >= 0; i--) {
/*
* We want to ext4_forget() only freshly allocated indirect
* blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
* buffer at branch[0].bh is indirect block / inode already
* existing before ext4_alloc_branch() was called.
*/
if (i > 0 && i != indirect_blks && branch[i].bh)
ext4_forget(handle, 1, inode, branch[i].bh,
branch[i].bh->b_blocknr);
ext4_free_blocks(handle, inode, NULL, new_blocks[i],
(i == indirect_blks) ? ar.len : 1, 0);
}
return err;
}
/**
* ext4_splice_branch - splice the allocated branch onto inode.
* @handle: handle for this transaction
* @inode: owner
* @block: (logical) number of block we are adding
* @chain: chain of indirect blocks (with a missing link - see
* ext4_alloc_branch)
* @where: location of missing link
* @num: number of indirect blocks we are adding
* @blks: number of direct blocks we are adding
*
* This function fills the missing link and does all housekeeping needed in
* inode (->i_blocks, etc.). In case of success we end up with the full
* chain to new block and return 0.
*/
static int ext4_splice_branch(handle_t *handle, struct inode *inode,
ext4_lblk_t block, Indirect *where, int num,
int blks)
{
int i;
int err = 0;
ext4_fsblk_t current_block;
/*
* If we're splicing into a [td]indirect block (as opposed to the
* inode) then we need to get write access to the [td]indirect block
* before the splice.
*/
if (where->bh) {
BUFFER_TRACE(where->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, where->bh);
if (err)
goto err_out;
}
/* That's it */
*where->p = where->key;
/*
* Update the host buffer_head or inode to point to more just allocated
* direct blocks blocks
*/
if (num == 0 && blks > 1) {
current_block = le32_to_cpu(where->key) + 1;
for (i = 1; i < blks; i++)
*(where->p + i) = cpu_to_le32(current_block++);
}
/* We are done with atomic stuff, now do the rest of housekeeping */
/* had we spliced it onto indirect block? */
if (where->bh) {
/*
* If we spliced it onto an indirect block, we haven't
* altered the inode. Note however that if it is being spliced
* onto an indirect block at the very end of the file (the
* file is growing) then we *will* alter the inode to reflect
* the new i_size. But that is not done here - it is done in
* generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
*/
jbd_debug(5, "splicing indirect only\n");
BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, where->bh);
if (err)
goto err_out;
} else {
/*
* OK, we spliced it into the inode itself on a direct block.
*/
ext4_mark_inode_dirty(handle, inode);
jbd_debug(5, "splicing direct\n");
}
return err;
err_out:
for (i = 1; i <= num; i++) {
/*
* branch[i].bh is newly allocated, so there is no
* need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA.
*/
ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
EXT4_FREE_BLOCKS_FORGET);
}
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
blks, 0);
return err;
}
/*
* The ext4_ind_map_blocks() function handles non-extents inodes
* (i.e., using the traditional indirect/double-indirect i_blocks
* scheme) for ext4_map_blocks().
*
* Allocation strategy is simple: if we have to allocate something, we will
* have to go the whole way to leaf. So let's do it before attaching anything
* to tree, set linkage between the newborn blocks, write them if sync is
* required, recheck the path, free and repeat if check fails, otherwise
* set the last missing link (that will protect us from any truncate-generated
* removals - all blocks on the path are immune now) and possibly force the
* write on the parent block.
* That has a nice additional property: no special recovery from the failed
* allocations is needed - we simply release blocks and do not touch anything
* reachable from inode.
*
* `handle' can be NULL if create == 0.
*
* return > 0, # of blocks mapped or allocated.
* return = 0, if plain lookup failed.
* return < 0, error case.
*
* The ext4_ind_get_blocks() function should be called with
* down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
* blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
* blocks.
*/
int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
int flags)
{
int err = -EIO;
ext4_lblk_t offsets[4];
Indirect chain[4];
Indirect *partial;
ext4_fsblk_t goal;
int indirect_blks;
int blocks_to_boundary = 0;
int depth;
int count = 0;
ext4_fsblk_t first_block = 0;
trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
depth = ext4_block_to_path(inode, map->m_lblk, offsets,
&blocks_to_boundary);
if (depth == 0)
goto out;
partial = ext4_get_branch(inode, depth, offsets, chain, &err);
/* Simplest case - block found, no allocation needed */
if (!partial) {
first_block = le32_to_cpu(chain[depth - 1].key);
count++;
/*map more blocks*/
while (count < map->m_len && count <= blocks_to_boundary) {
ext4_fsblk_t blk;
blk = le32_to_cpu(*(chain[depth-1].p + count));
if (blk == first_block + count)
count++;
else
break;
}
goto got_it;
}
/* Next simple case - plain lookup or failed read of indirect block */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
goto cleanup;
/*
* Okay, we need to do block allocation.
*/
if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
"non-extent mapped inodes with bigalloc");
return -ENOSPC;
}
goal = ext4_find_goal(inode, map->m_lblk, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
/*
* Next look up the indirect map to count the totoal number of
* direct blocks to allocate for this branch.
*/
count = ext4_blks_to_allocate(partial, indirect_blks,
map->m_len, blocks_to_boundary);
/*
* Block out ext4_truncate while we alter the tree
*/
err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
&count, goal,
offsets + (partial - chain), partial);
/*
* The ext4_splice_branch call will free and forget any buffers
* on the new chain if there is a failure, but that risks using
* up transaction credits, especially for bitmaps where the
* credits cannot be returned. Can we handle this somehow? We
* may need to return -EAGAIN upwards in the worst case. --sct
*/
if (!err)
err = ext4_splice_branch(handle, inode, map->m_lblk,
partial, indirect_blks, count);
if (err)
goto cleanup;
map->m_flags |= EXT4_MAP_NEW;
ext4_update_inode_fsync_trans(handle, inode, 1);
got_it:
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = le32_to_cpu(chain[depth-1].key);
map->m_len = count;
if (count > blocks_to_boundary)
map->m_flags |= EXT4_MAP_BOUNDARY;
err = count;
/* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */
cleanup:
while (partial > chain) {
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
out:
trace_ext4_ind_map_blocks_exit(inode, map, err);
return err;
}
/*
* O_DIRECT for ext3 (or indirect map) based files
*
* If the O_DIRECT write will extend the file then add this inode to the
* orphan list. So recovery will truncate it back to the original size
* if the machine crashes during the write.
*
* If the O_DIRECT write is intantiating holes inside i_size and the machine
* crashes then stale disk data _may_ be exposed inside the file. But current
* VFS code falls back into buffered path in that case so we are safe.
*/
ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ext4_inode_info *ei = EXT4_I(inode);
handle_t *handle;
ssize_t ret;
int orphan = 0;
size_t count = iov_length(iov, nr_segs);
int retries = 0;
if (rw == WRITE) {
loff_t final_size = offset + count;
if (final_size > inode->i_size) {
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
ret = ext4_orphan_add(handle, inode);
if (ret) {
ext4_journal_stop(handle);
goto out;
}
orphan = 1;
ei->i_disksize = inode->i_size;
ext4_journal_stop(handle);
}
}
retry:
if (rw == READ && ext4_should_dioread_nolock(inode)) {
if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) {
mutex_lock(&inode->i_mutex);
ext4_flush_unwritten_io(inode);
mutex_unlock(&inode->i_mutex);
}
/*
* Nolock dioread optimization may be dynamically disabled
* via ext4_inode_block_unlocked_dio(). Check inode's state
* while holding extra i_dio_count ref.
*/
atomic_inc(&inode->i_dio_count);
smp_mb();
if (unlikely(ext4_test_inode_state(inode,
EXT4_STATE_DIOREAD_LOCK))) {
inode_dio_done(inode);
goto locked;
}
ret = __blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block, NULL, NULL, 0);
inode_dio_done(inode);
} else {
locked:
ret = blockdev_direct_IO(rw, iocb, inode, iov,
offset, nr_segs, ext4_get_block);
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
ext4_truncate_failed_write(inode);
}
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
if (orphan) {
int err;
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
/* This is really bad luck. We've written the data
* but cannot extend i_size. Bail out and pretend
* the write failed... */
ret = PTR_ERR(handle);
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
goto out;
}
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
if (ret > 0) {
loff_t end = offset + ret;
if (end > inode->i_size) {
ei->i_disksize = end;
i_size_write(inode, end);
/*
* We're going to return a positive `ret'
* here due to non-zero-length I/O, so there's
* no way of reporting error returns from
* ext4_mark_inode_dirty() to userspace. So
* ignore it.
*/
ext4_mark_inode_dirty(handle, inode);
}
}
err = ext4_journal_stop(handle);
if (ret == 0)
ret = err;
}
out:
return ret;
}
/*
* Calculate the number of metadata blocks need to reserve
* to allocate a new block at @lblocks for non extent file based file
*/
int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
{
struct ext4_inode_info *ei = EXT4_I(inode);
sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
int blk_bits;
if (lblock < EXT4_NDIR_BLOCKS)
return 0;
lblock -= EXT4_NDIR_BLOCKS;
if (ei->i_da_metadata_calc_len &&
(lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
ei->i_da_metadata_calc_len++;
return 0;
}
ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
ei->i_da_metadata_calc_len = 1;
blk_bits = order_base_2(lblock);
return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
}
int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
int indirects;
/* if nrblocks are contiguous */
if (chunk) {
/*
* With N contiguous data blocks, we need at most
* N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
* 2 dindirect blocks, and 1 tindirect block
*/
return DIV_ROUND_UP(nrblocks,
EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
}
/*
* if nrblocks are not contiguous, worse case, each block touch
* a indirect block, and each indirect block touch a double indirect
* block, plus a triple indirect block
*/
indirects = nrblocks * 2 + 1;
return indirects;
}
/*
* Truncate transactions can be complex and absolutely huge. So we need to
* be able to restart the transaction at a conventient checkpoint to make
* sure we don't overflow the journal.
*
* Try to extend this transaction for the purposes of truncation. If
* extend fails, we need to propagate the failure up and restart the
* transaction in the top-level truncate loop. --sct
*
* Returns 0 if we managed to create more room. If we can't create more
* room, and the transaction must be restarted we return 1.
*/
static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
{
if (!ext4_handle_valid(handle))
return 0;
if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
return 0;
if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
return 0;
return 1;
}
/*
* Probably it should be a library function... search for first non-zero word
* or memcmp with zero_page, whatever is better for particular architecture.
* Linus?
*/
static inline int all_zeroes(__le32 *p, __le32 *q)
{
while (p < q)
if (*p++)
return 0;
return 1;
}
/**
* ext4_find_shared - find the indirect blocks for partial truncation.
* @inode: inode in question
* @depth: depth of the affected branch
* @offsets: offsets of pointers in that branch (see ext4_block_to_path)
* @chain: place to store the pointers to partial indirect blocks
* @top: place to the (detached) top of branch
*
* This is a helper function used by ext4_truncate().
*
* When we do truncate() we may have to clean the ends of several
* indirect blocks but leave the blocks themselves alive. Block is
* partially truncated if some data below the new i_size is referred
* from it (and it is on the path to the first completely truncated
* data block, indeed). We have to free the top of that path along
* with everything to the right of the path. Since no allocation
* past the truncation point is possible until ext4_truncate()
* finishes, we may safely do the latter, but top of branch may
* require special attention - pageout below the truncation point
* might try to populate it.
*
* We atomically detach the top of branch from the tree, store the
* block number of its root in *@top, pointers to buffer_heads of
* partially truncated blocks - in @chain[].bh and pointers to
* their last elements that should not be removed - in
* @chain[].p. Return value is the pointer to last filled element
* of @chain.
*
* The work left to caller to do the actual freeing of subtrees:
* a) free the subtree starting from *@top
* b) free the subtrees whose roots are stored in
* (@chain[i].p+1 .. end of @chain[i].bh->b_data)
* c) free the subtrees growing from the inode past the @chain[0].
* (no partially truncated stuff there). */
static Indirect *ext4_find_shared(struct inode *inode, int depth,
ext4_lblk_t offsets[4], Indirect chain[4],
__le32 *top)
{
Indirect *partial, *p;
int k, err;
*top = 0;
/* Make k index the deepest non-null offset + 1 */
for (k = depth; k > 1 && !offsets[k-1]; k--)
;
partial = ext4_get_branch(inode, k, offsets, chain, &err);
/* Writer: pointers */
if (!partial)
partial = chain + k-1;
/*
* If the branch acquired continuation since we've looked at it -
* fine, it should all survive and (new) top doesn't belong to us.
*/
if (!partial->key && *partial->p)
/* Writer: end */
goto no_top;
for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
;
/*
* OK, we've found the last block that must survive. The rest of our
* branch should be detached before unlocking. However, if that rest
* of branch is all ours and does not grow immediately from the inode
* it's easier to cheat and just decrement partial->p.
*/
if (p == chain + k - 1 && p > chain) {
p->p--;
} else {
*top = *p->p;
/* Nope, don't do this in ext4. Must leave the tree intact */
#if 0
*p->p = 0;
#endif
}
/* Writer: end */
while (partial > p) {
brelse(partial->bh);
partial--;
}
no_top:
return partial;
}
/*
* Zero a number of block pointers in either an inode or an indirect block.
* If we restart the transaction we must again get write access to the
* indirect block for further modification.
*
* We release `count' blocks on disk, but (last - first) may be greater
* than `count' because there can be holes in there.
*
* Return 0 on success, 1 on invalid block range
* and < 0 on fatal error.
*/
static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh,
ext4_fsblk_t block_to_free,
unsigned long count, __le32 *first,
__le32 *last)
{
__le32 *p;
int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
int err;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
flags |= EXT4_FREE_BLOCKS_METADATA;
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
count)) {
EXT4_ERROR_INODE(inode, "attempt to clear invalid "
"blocks %llu len %lu",
(unsigned long long) block_to_free, count);
return 1;
}
if (try_to_extend_transaction(handle, inode)) {
if (bh) {
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (unlikely(err))
goto out_err;
}
err = ext4_mark_inode_dirty(handle, inode);
if (unlikely(err))
goto out_err;
err = ext4_truncate_restart_trans(handle, inode,
ext4_blocks_for_truncate(inode));
if (unlikely(err))
goto out_err;
if (bh) {
BUFFER_TRACE(bh, "retaking write access");
err = ext4_journal_get_write_access(handle, bh);
if (unlikely(err))
goto out_err;
}
}
for (p = first; p < last; p++)
*p = 0;
ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
return 0;
out_err:
ext4_std_error(inode->i_sb, err);
return err;
}
/**
* ext4_free_data - free a list of data blocks
* @handle: handle for this transaction
* @inode: inode we are dealing with
* @this_bh: indirect buffer_head which contains *@first and *@last
* @first: array of block numbers
* @last: points immediately past the end of array
*
* We are freeing all blocks referred from that array (numbers are stored as
* little-endian 32-bit) and updating @inode->i_blocks appropriately.
*
* We accumulate contiguous runs of blocks to free. Conveniently, if these
* blocks are contiguous then releasing them at one time will only affect one
* or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
* actually use a lot of journal space.
*
* @this_bh will be %NULL if @first and @last point into the inode's direct
* block pointers.
*/
static void ext4_free_data(handle_t *handle, struct inode *inode,
struct buffer_head *this_bh,
__le32 *first, __le32 *last)
{
ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
unsigned long count = 0; /* Number of blocks in the run */
__le32 *block_to_free_p = NULL; /* Pointer into inode/ind
corresponding to
block_to_free */
ext4_fsblk_t nr; /* Current block # */
__le32 *p; /* Pointer into inode/ind
for current block */
int err = 0;
if (this_bh) { /* For indirect block */
BUFFER_TRACE(this_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, this_bh);
/* Important: if we can't update the indirect pointers
* to the blocks, we can't free them. */
if (err)
return;
}
for (p = first; p < last; p++) {
nr = le32_to_cpu(*p);
if (nr) {
/* accumulate blocks to free if they're contiguous */
if (count == 0) {
block_to_free = nr;
block_to_free_p = p;
count = 1;
} else if (nr == block_to_free + count) {
count++;
} else {
err = ext4_clear_blocks(handle, inode, this_bh,
block_to_free, count,
block_to_free_p, p);
if (err)
break;
block_to_free = nr;
block_to_free_p = p;
count = 1;
}
}
}
if (!err && count > 0)
err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
count, block_to_free_p, p);
if (err < 0)
/* fatal error */
return;
if (this_bh) {
BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
/*
* The buffer head should have an attached journal head at this
* point. However, if the data is corrupted and an indirect
* block pointed to itself, it would have been detached when
* the block was cleared. Check for this instead of OOPSing.
*/
if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
ext4_handle_dirty_metadata(handle, inode, this_bh);
else
EXT4_ERROR_INODE(inode,
"circular indirect block detected at "
"block %llu",
(unsigned long long) this_bh->b_blocknr);
}
}
/**
* ext4_free_branches - free an array of branches
* @handle: JBD handle for this transaction
* @inode: inode we are dealing with
* @parent_bh: the buffer_head which contains *@first and *@last
* @first: array of block numbers
* @last: pointer immediately past the end of array
* @depth: depth of the branches to free
*
* We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
static void ext4_free_branches(handle_t *handle, struct inode *inode,
struct buffer_head *parent_bh,
__le32 *first, __le32 *last, int depth)
{
ext4_fsblk_t nr;
__le32 *p;
if (ext4_handle_is_aborted(handle))
return;
if (depth--) {
struct buffer_head *bh;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
p = last;
while (--p >= first) {
nr = le32_to_cpu(*p);
if (!nr)
continue; /* A hole */
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
nr, 1)) {
EXT4_ERROR_INODE(inode,
"invalid indirect mapped "
"block %lu (level %d)",
(unsigned long) nr, depth);
break;
}
/* Go read the buffer for the next level down */
bh = sb_bread(inode->i_sb, nr);
/*
* A read failure? Report error and clear slot
* (should be rare).
*/
if (!bh) {
EXT4_ERROR_INODE_BLOCK(inode, nr,
"Read failure");
continue;
}
/* This zaps the entire block. Bottom up. */
BUFFER_TRACE(bh, "free child branches");
ext4_free_branches(handle, inode, bh,
(__le32 *) bh->b_data,
(__le32 *) bh->b_data + addr_per_block,
depth);
brelse(bh);
/*
* Everything below this this pointer has been
* released. Now let this top-of-subtree go.
*
* We want the freeing of this indirect block to be
* atomic in the journal with the updating of the
* bitmap block which owns it. So make some room in
* the journal.
*
* We zero the parent pointer *after* freeing its
* pointee in the bitmaps, so if extend_transaction()
* for some reason fails to put the bitmap changes and
* the release into the same transaction, recovery
* will merely complain about releasing a free block,
* rather than leaking blocks.
*/
if (ext4_handle_is_aborted(handle))
return;
if (try_to_extend_transaction(handle, inode)) {
ext4_mark_inode_dirty(handle, inode);
ext4_truncate_restart_trans(handle, inode,
ext4_blocks_for_truncate(inode));
}
/*
* The forget flag here is critical because if
* we are journaling (and not doing data
* journaling), we have to make sure a revoke
* record is written to prevent the journal
* replay from overwriting the (former)
* indirect block if it gets reallocated as a
* data block. This must happen in the same
* transaction where the data blocks are
* actually freed.
*/
ext4_free_blocks(handle, inode, NULL, nr, 1,
EXT4_FREE_BLOCKS_METADATA|
EXT4_FREE_BLOCKS_FORGET);
if (parent_bh) {
/*
* The block which we have just freed is
* pointed to by an indirect block: journal it
*/
BUFFER_TRACE(parent_bh, "get_write_access");
if (!ext4_journal_get_write_access(handle,
parent_bh)){
*p = 0;
BUFFER_TRACE(parent_bh,
"call ext4_handle_dirty_metadata");
ext4_handle_dirty_metadata(handle,
inode,
parent_bh);
}
}
}
} else {
/* We have reached the bottom of the tree. */
BUFFER_TRACE(parent_bh, "free data blocks");
ext4_free_data(handle, inode, parent_bh, first, last);
}
}
void ext4_ind_truncate(handle_t *handle, struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
__le32 *i_data = ei->i_data;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
ext4_lblk_t offsets[4];
Indirect chain[4];
Indirect *partial;
__le32 nr = 0;
int n = 0;
ext4_lblk_t last_block, max_block;
unsigned blocksize = inode->i_sb->s_blocksize;
last_block = (inode->i_size + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
if (last_block != max_block) {
n = ext4_block_to_path(inode, last_block, offsets, NULL);
if (n == 0)
return;
}
ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
/*
* The orphan list entry will now protect us from any crash which
* occurs before the truncate completes, so it is now safe to propagate
* the new, shorter inode size (held for now in i_size) into the
* on-disk inode. We do this via i_disksize, which is the value which
* ext4 *really* writes onto the disk inode.
*/
ei->i_disksize = inode->i_size;
if (last_block == max_block) {
/*
* It is unnecessary to free any data blocks if last_block is
* equal to the indirect block limit.
*/
return;
} else if (n == 1) { /* direct blocks */
ext4_free_data(handle, inode, NULL, i_data+offsets[0],
i_data + EXT4_NDIR_BLOCKS);
goto do_indirects;
}
partial = ext4_find_shared(inode, n, offsets, chain, &nr);
/* Kill the top of shared branch (not detached) */
if (nr) {
if (partial == chain) {
/* Shared branch grows from the inode */
ext4_free_branches(handle, inode, NULL,
&nr, &nr+1, (chain+n-1) - partial);
*partial->p = 0;
/*
* We mark the inode dirty prior to restart,
* and prior to stop. No need for it here.
*/
} else {
/* Shared branch grows from an indirect block */
BUFFER_TRACE(partial->bh, "get_write_access");
ext4_free_branches(handle, inode, partial->bh,
partial->p,
partial->p+1, (chain+n-1) - partial);
}
}
/* Clear the ends of indirect blocks on the shared branch */
while (partial > chain) {
ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
(__le32*)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
do_indirects:
/* Kill the remaining (whole) subtrees */
switch (offsets[0]) {
default:
nr = i_data[EXT4_IND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
case EXT4_IND_BLOCK:
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
case EXT4_DIND_BLOCK:
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
case EXT4_TIND_BLOCK:
;
}
}
static int free_hole_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *parent_bh, __le32 *i_data,
int level, ext4_lblk_t first,
ext4_lblk_t count, int max)
{
struct buffer_head *bh = NULL;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int ret = 0;
int i, inc;
ext4_lblk_t offset;
__le32 blk;
inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level);
for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) {
if (offset >= count + first)
break;
if (*i_data == 0 || (offset + inc) <= first)
continue;
blk = *i_data;
if (level > 0) {
ext4_lblk_t first2;
ext4_lblk_t count2;
bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
if (!bh) {
EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
"Read failure");
return -EIO;
}
if (first > offset) {
first2 = first - offset;
count2 = count;
} else {
first2 = 0;
count2 = count - (offset - first);
}
ret = free_hole_blocks(handle, inode, bh,
(__le32 *)bh->b_data, level - 1,
first2, count2,
inode->i_sb->s_blocksize >> 2);
if (ret) {
brelse(bh);
goto err;
}
}
if (level == 0 ||
(bh && all_zeroes((__le32 *)bh->b_data,
(__le32 *)bh->b_data + addr_per_block))) {
ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
*i_data = 0;
}
brelse(bh);
bh = NULL;
}
err:
return ret;
}
int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t first, ext4_lblk_t stop)
{
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int level, ret = 0;
int num = EXT4_NDIR_BLOCKS;
ext4_lblk_t count, max = EXT4_NDIR_BLOCKS;
__le32 *i_data = EXT4_I(inode)->i_data;
count = stop - first;
for (level = 0; level < 4; level++, max *= addr_per_block) {
if (first < max) {
ret = free_hole_blocks(handle, inode, NULL, i_data,
level, first, count, num);
if (ret)
goto err;
if (count > max - first)
count -= max - first;
else
break;
first = 0;
} else {
first -= max;
}
i_data += num;
if (level == 0) {
num = 1;
max = 1;
}
}
err:
return ret;
}
| gpl-2.0 |
MoKee/android_kernel_samsung_klte | drivers/staging/iio/imu/inv_mpu/inv_mpu_trigger.c | 1118 | 1947 | /*
* Copyright (C) 2012 Invensense, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/jiffies.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kfifo.h>
#include <linux/poll.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include "iio.h"
#include "sysfs.h"
#include "trigger.h"
#include "inv_mpu_iio.h"
/*
* inv_mpu_data_rdy_trigger_set_state() set data ready interrupt state
*/
static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
return 0;
}
static const struct iio_trigger_ops inv_mpu_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = &inv_mpu_data_rdy_trigger_set_state,
};
int inv_mpu_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
struct inv_mpu_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("%s-dev%d",
indio_dev->name,
indio_dev->id);
if (st->trig == NULL)
return -ENOMEM;
st->trig->dev.parent = &st->client->dev;
st->trig->private_data = indio_dev;
st->trig->ops = &inv_mpu_trigger_ops;
ret = iio_trigger_register(st->trig);
if (ret) {
iio_free_trigger(st->trig);
return -EPERM;
}
indio_dev->trig = st->trig;
return 0;
}
void inv_mpu_remove_trigger(struct iio_dev *indio_dev)
{
struct inv_mpu_state *st = iio_priv(indio_dev);
iio_trigger_unregister(st->trig);
iio_free_trigger(st->trig);
}
| gpl-2.0 |
NoelMacwan/SXDNickiKernels | drivers/staging/qcache/tmem.c | 1374 | 22643 | /*
* In-kernel transcendent memory (generic implementation)
*
* Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
* "handles" (triples containing a pool id, and object id, and an index), to
* pages in a page-accessible memory (PAM). Tmem references the PAM pages via
* an abstract "pampd" (PAM page-descriptor), which can be operated on by a
* set of functions (pamops). Each pampd contains some representation of
* PAGE_SIZE bytes worth of data. Tmem must support potentially millions of
* pages and must be able to insert, find, and delete these pages at a
* potential frequency of thousands per second concurrently across many CPUs,
* (and, if used with KVM, across many vcpus across many guests).
* Tmem is tracked with a hierarchy of data structures, organized by
* the elements in a handle-tuple: pool_id, object_id, and page index.
* One or more "clients" (e.g. guests) each provide one or more tmem_pools.
* Each pool, contains a hash table of rb_trees of tmem_objs. Each
* tmem_obj contains a radix-tree-like tree of pointers, with intermediate
* nodes called tmem_objnodes. Each leaf pointer in this tree points to
* a pampd, which is accessible only through a small set of callbacks
* registered by the PAM implementation (see tmem_register_pamops). Tmem
* does all memory allocation via a set of callbacks registered by the tmem
* host implementation (e.g. see tmem_register_hostops).
*/
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include "tmem.h"
/* data structure sentinels used for debugging... see tmem.h */
#define POOL_SENTINEL 0x87658765
#define OBJ_SENTINEL 0x12345678
#define OBJNODE_SENTINEL 0xfedcba09
static bool tmem_enabled;
static void lock_tmem_state(void)
{
lock_fmem_state();
}
static void unlock_tmem_state(void)
{
unlock_fmem_state();
}
/*
* A tmem host implementation must use this function to register callbacks
* for memory allocation.
*/
static struct tmem_hostops tmem_hostops;
static void tmem_objnode_tree_init(void);
void tmem_register_hostops(struct tmem_hostops *m)
{
tmem_objnode_tree_init();
tmem_hostops = *m;
}
/*
* A tmem host implementation must use this function to register
* callbacks for a page-accessible memory (PAM) implementation
*/
static struct tmem_pamops tmem_pamops;
void tmem_register_pamops(struct tmem_pamops *m)
{
tmem_pamops = *m;
}
/*
* Oid's are potentially very sparse and tmem_objs may have an indeterminately
* short life, being added and deleted at a relatively high frequency.
* So an rb_tree is an ideal data structure to manage tmem_objs. But because
* of the potentially huge number of tmem_objs, each pool manages a hashtable
* of rb_trees to reduce search, insert, delete, and rebalancing time.
* Each hashbucket also has a lock to manage concurrent access.
*
* The following routines manage tmem_objs. When any tmem_obj is accessed,
* the hashbucket lock must be held.
*/
/* searches for object==oid in pool, returns locked object if found */
static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
struct tmem_oid *oidp)
{
struct rb_node *rbnode;
struct tmem_obj *obj;
rbnode = hb->obj_rb_root.rb_node;
while (rbnode) {
BUG_ON(RB_EMPTY_NODE(rbnode));
obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
switch (tmem_oid_compare(oidp, &obj->oid)) {
case 0: /* equal */
goto out;
case -1:
rbnode = rbnode->rb_left;
break;
case 1:
rbnode = rbnode->rb_right;
break;
}
}
obj = NULL;
out:
return obj;
}
static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *);
/* free an object that has no more pampds in it */
static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
{
struct tmem_pool *pool;
BUG_ON(obj == NULL);
ASSERT_SENTINEL(obj, OBJ);
BUG_ON(obj->pampd_count > 0);
pool = obj->pool;
BUG_ON(pool == NULL);
if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
tmem_pampd_destroy_all_in_obj(obj);
BUG_ON(obj->objnode_tree_root != NULL);
BUG_ON((long)obj->objnode_count != 0);
atomic_dec(&pool->obj_count);
BUG_ON(atomic_read(&pool->obj_count) < 0);
INVERT_SENTINEL(obj, OBJ);
obj->pool = NULL;
tmem_oid_set_invalid(&obj->oid);
rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
}
/*
* initialize, and insert an tmem_object_root (called only if find failed)
*/
static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
struct tmem_pool *pool,
struct tmem_oid *oidp)
{
struct rb_root *root = &hb->obj_rb_root;
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct tmem_obj *this;
BUG_ON(pool == NULL);
atomic_inc(&pool->obj_count);
obj->objnode_tree_height = 0;
obj->objnode_tree_root = NULL;
obj->pool = pool;
obj->oid = *oidp;
obj->objnode_count = 0;
obj->pampd_count = 0;
(*tmem_pamops.new_obj)(obj);
SET_SENTINEL(obj, OBJ);
while (*new) {
BUG_ON(RB_EMPTY_NODE(*new));
this = rb_entry(*new, struct tmem_obj, rb_tree_node);
parent = *new;
switch (tmem_oid_compare(oidp, &this->oid)) {
case 0:
BUG(); /* already present; should never happen! */
break;
case -1:
new = &(*new)->rb_left;
break;
case 1:
new = &(*new)->rb_right;
break;
}
}
rb_link_node(&obj->rb_tree_node, parent, new);
rb_insert_color(&obj->rb_tree_node, root);
}
/*
* Tmem is managed as a set of tmem_pools with certain attributes, such as
* "ephemeral" vs "persistent". These attributes apply to all tmem_objs
* and all pampds that belong to a tmem_pool. A tmem_pool is created
* or deleted relatively rarely (for example, when a filesystem is
* mounted or unmounted.
*/
/* flush all data from a pool and, optionally, free it */
static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
{
struct rb_node *rbnode;
struct tmem_obj *obj;
struct tmem_hashbucket *hb = &pool->hashbucket[0];
int i;
BUG_ON(pool == NULL);
for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
spin_lock(&hb->lock);
rbnode = rb_first(&hb->obj_rb_root);
while (rbnode != NULL) {
obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
rbnode = rb_next(rbnode);
tmem_pampd_destroy_all_in_obj(obj);
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
}
spin_unlock(&hb->lock);
}
if (destroy)
list_del(&pool->pool_list);
}
/*
* A tmem_obj contains a radix-tree-like tree in which the intermediate
* nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
* is very specialized and tuned for specific uses and is not particularly
* suited for use from this code, though some code from the core algorithms has
* been reused, thus the copyright notices below). Each tmem_objnode contains
* a set of pointers which point to either a set of intermediate tmem_objnodes
* or a set of of pampds.
*
* Portions Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
* Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
*/
struct tmem_objnode_tree_path {
struct tmem_objnode *objnode;
int offset;
};
/* objnode height_to_maxindex translation */
static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
static void tmem_objnode_tree_init(void)
{
unsigned int ht, tmp;
for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
tmp = ht * OBJNODE_TREE_MAP_SHIFT;
if (tmp >= OBJNODE_TREE_INDEX_BITS)
tmem_objnode_tree_h2max[ht] = ~0UL;
else
tmem_objnode_tree_h2max[ht] =
(~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
}
}
static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
{
struct tmem_objnode *objnode;
ASSERT_SENTINEL(obj, OBJ);
BUG_ON(obj->pool == NULL);
ASSERT_SENTINEL(obj->pool, POOL);
objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
if (unlikely(objnode == NULL))
goto out;
objnode->obj = obj;
SET_SENTINEL(objnode, OBJNODE);
memset(&objnode->slots, 0, sizeof(objnode->slots));
objnode->slots_in_use = 0;
obj->objnode_count++;
out:
return objnode;
}
static void tmem_objnode_free(struct tmem_objnode *objnode)
{
struct tmem_pool *pool;
int i;
BUG_ON(objnode == NULL);
for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
BUG_ON(objnode->slots[i] != NULL);
ASSERT_SENTINEL(objnode, OBJNODE);
INVERT_SENTINEL(objnode, OBJNODE);
BUG_ON(objnode->obj == NULL);
ASSERT_SENTINEL(objnode->obj, OBJ);
pool = objnode->obj->pool;
BUG_ON(pool == NULL);
ASSERT_SENTINEL(pool, POOL);
objnode->obj->objnode_count--;
objnode->obj = NULL;
(*tmem_hostops.objnode_free)(objnode, pool);
}
/*
* lookup index in object and return associated pampd (or NULL if not found)
*/
static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
{
unsigned int height, shift;
struct tmem_objnode **slot = NULL;
BUG_ON(obj == NULL);
ASSERT_SENTINEL(obj, OBJ);
BUG_ON(obj->pool == NULL);
ASSERT_SENTINEL(obj->pool, POOL);
height = obj->objnode_tree_height;
if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
goto out;
if (height == 0 && obj->objnode_tree_root) {
slot = &obj->objnode_tree_root;
goto out;
}
shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
slot = &obj->objnode_tree_root;
while (height > 0) {
if (*slot == NULL)
goto out;
slot = (struct tmem_objnode **)
((*slot)->slots +
((index >> shift) & OBJNODE_TREE_MAP_MASK));
shift -= OBJNODE_TREE_MAP_SHIFT;
height--;
}
out:
return slot != NULL ? (void **)slot : NULL;
}
static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
{
struct tmem_objnode **slot;
slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
return slot != NULL ? *slot : NULL;
}
static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
void *new_pampd)
{
struct tmem_objnode **slot;
void *ret = NULL;
slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
if ((slot != NULL) && (*slot != NULL)) {
void *old_pampd = *(void **)slot;
*(void **)slot = new_pampd;
(*tmem_pamops.free)(old_pampd, obj->pool, NULL, 0);
ret = new_pampd;
}
return ret;
}
static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
void *pampd)
{
int ret = 0;
struct tmem_objnode *objnode = NULL, *newnode, *slot;
unsigned int height, shift;
int offset = 0;
/* if necessary, extend the tree to be higher */
if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
height = obj->objnode_tree_height + 1;
if (index > tmem_objnode_tree_h2max[height])
while (index > tmem_objnode_tree_h2max[height])
height++;
if (obj->objnode_tree_root == NULL) {
obj->objnode_tree_height = height;
goto insert;
}
do {
newnode = tmem_objnode_alloc(obj);
if (!newnode) {
ret = -ENOMEM;
goto out;
}
newnode->slots[0] = obj->objnode_tree_root;
newnode->slots_in_use = 1;
obj->objnode_tree_root = newnode;
obj->objnode_tree_height++;
} while (height > obj->objnode_tree_height);
}
insert:
slot = obj->objnode_tree_root;
height = obj->objnode_tree_height;
shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
while (height > 0) {
if (slot == NULL) {
/* add a child objnode. */
slot = tmem_objnode_alloc(obj);
if (!slot) {
ret = -ENOMEM;
goto out;
}
if (objnode) {
objnode->slots[offset] = slot;
objnode->slots_in_use++;
} else
obj->objnode_tree_root = slot;
}
/* go down a level */
offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
objnode = slot;
slot = objnode->slots[offset];
shift -= OBJNODE_TREE_MAP_SHIFT;
height--;
}
BUG_ON(slot != NULL);
if (objnode) {
objnode->slots_in_use++;
objnode->slots[offset] = pampd;
} else
obj->objnode_tree_root = pampd;
obj->pampd_count++;
out:
return ret;
}
static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
{
struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
struct tmem_objnode_tree_path *pathp = path;
struct tmem_objnode *slot = NULL;
unsigned int height, shift;
int offset;
BUG_ON(obj == NULL);
ASSERT_SENTINEL(obj, OBJ);
BUG_ON(obj->pool == NULL);
ASSERT_SENTINEL(obj->pool, POOL);
height = obj->objnode_tree_height;
if (index > tmem_objnode_tree_h2max[height])
goto out;
slot = obj->objnode_tree_root;
if (height == 0 && obj->objnode_tree_root) {
obj->objnode_tree_root = NULL;
goto out;
}
shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
pathp->objnode = NULL;
do {
if (slot == NULL)
goto out;
pathp++;
offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
pathp->offset = offset;
pathp->objnode = slot;
slot = slot->slots[offset];
shift -= OBJNODE_TREE_MAP_SHIFT;
height--;
} while (height > 0);
if (slot == NULL)
goto out;
while (pathp->objnode) {
pathp->objnode->slots[pathp->offset] = NULL;
pathp->objnode->slots_in_use--;
if (pathp->objnode->slots_in_use) {
if (pathp->objnode == obj->objnode_tree_root) {
while (obj->objnode_tree_height > 0 &&
obj->objnode_tree_root->slots_in_use == 1 &&
obj->objnode_tree_root->slots[0]) {
struct tmem_objnode *to_free =
obj->objnode_tree_root;
obj->objnode_tree_root =
to_free->slots[0];
obj->objnode_tree_height--;
to_free->slots[0] = NULL;
to_free->slots_in_use = 0;
tmem_objnode_free(to_free);
}
}
goto out;
}
tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
pathp--;
}
obj->objnode_tree_height = 0;
obj->objnode_tree_root = NULL;
out:
if (slot != NULL)
obj->pampd_count--;
BUG_ON(obj->pampd_count < 0);
return slot;
}
/* recursively walk the objnode_tree destroying pampds and objnodes */
static void tmem_objnode_node_destroy(struct tmem_obj *obj,
struct tmem_objnode *objnode,
unsigned int ht)
{
int i;
if (ht == 0)
return;
for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
if (objnode->slots[i]) {
if (ht == 1) {
obj->pampd_count--;
(*tmem_pamops.free)(objnode->slots[i],
obj->pool, NULL, 0);
objnode->slots[i] = NULL;
continue;
}
tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
tmem_objnode_free(objnode->slots[i]);
objnode->slots[i] = NULL;
}
}
}
static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
{
if (obj->objnode_tree_root == NULL)
return;
if (obj->objnode_tree_height == 0) {
obj->pampd_count--;
(*tmem_pamops.free)(obj->objnode_tree_root, obj->pool, NULL, 0);
} else {
tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
obj->objnode_tree_height);
tmem_objnode_free(obj->objnode_tree_root);
obj->objnode_tree_height = 0;
}
obj->objnode_tree_root = NULL;
(*tmem_pamops.free_obj)(obj->pool, obj);
}
/*
* Tmem is operated on by a set of well-defined actions:
* "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
* (The tmem ABI allows for subpages and exchanges but these operations
* are not included in this implementation.)
*
* These "tmem core" operations are implemented in the following functions.
*/
/*
* "Put" a page, e.g. copy a page from the kernel into newly allocated
* PAM space (if such space is available). Tmem_put is complicated by
* a corner case: What if a page with matching handle already exists in
* tmem? To guarantee coherency, one of two actions is necessary: Either
* the data for the page must be overwritten, or the page must be
* "flushed" so that the data is not accessible to a subsequent "get".
* Since these "duplicate puts" are relatively rare, this implementation
* always flushes for simplicity.
*/
int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
char *data, size_t size, bool raw, bool ephemeral)
{
struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
void *pampd = NULL, *pampd_del = NULL;
int ret = -ENOMEM;
struct tmem_hashbucket *hb;
lock_tmem_state();
if (!tmem_enabled)
goto disabled;
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
spin_lock(&hb->lock);
obj = objfound = tmem_obj_find(hb, oidp);
if (obj != NULL) {
pampd = tmem_pampd_lookup_in_obj(objfound, index);
if (pampd != NULL) {
/* if found, is a dup put, flush the old one */
pampd_del = tmem_pampd_delete_from_obj(obj, index);
BUG_ON(pampd_del != pampd);
(*tmem_pamops.free)(pampd, pool, oidp, index);
if (obj->pampd_count == 0) {
objnew = obj;
objfound = NULL;
}
pampd = NULL;
}
} else {
obj = objnew = (*tmem_hostops.obj_alloc)(pool);
if (unlikely(obj == NULL)) {
ret = -ENOMEM;
goto out;
}
tmem_obj_init(obj, hb, pool, oidp);
}
BUG_ON(obj == NULL);
BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
pampd = (*tmem_pamops.create)(data, size, raw, ephemeral,
obj->pool, &obj->oid, index);
if (unlikely(pampd == NULL))
goto free;
ret = tmem_pampd_add_to_obj(obj, index, pampd);
if (unlikely(ret == -ENOMEM))
/* may have partially built objnode tree ("stump") */
goto delete_and_free;
goto out;
delete_and_free:
(void)tmem_pampd_delete_from_obj(obj, index);
free:
if (pampd)
(*tmem_pamops.free)(pampd, pool, NULL, 0);
if (objnew) {
tmem_obj_free(objnew, hb);
(*tmem_hostops.obj_free)(objnew, pool);
}
out:
spin_unlock(&hb->lock);
disabled:
unlock_tmem_state();
return ret;
}
/*
* "Get" a page, e.g. if one can be found, copy the tmem page with the
* matching handle from PAM space to the kernel. By tmem definition,
* when a "get" is successful on an ephemeral page, the page is "flushed",
* and when a "get" is successful on a persistent page, the page is retained
* in tmem. Note that to preserve
* coherency, "get" can never be skipped if tmem contains the data.
* That is, if a get is done with a certain handle and fails, any
* subsequent "get" must also fail (unless of course there is a
* "put" done with the same handle).
*/
int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
char *data, size_t *size, bool raw, int get_and_free)
{
struct tmem_obj *obj;
void *pampd;
bool ephemeral = is_ephemeral(pool);
int ret = -1;
struct tmem_hashbucket *hb;
bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
bool lock_held = false;
lock_tmem_state();
if (!tmem_enabled)
goto disabled;
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
spin_lock(&hb->lock);
lock_held = true;
obj = tmem_obj_find(hb, oidp);
if (obj == NULL)
goto out;
if (free)
pampd = tmem_pampd_delete_from_obj(obj, index);
else
pampd = tmem_pampd_lookup_in_obj(obj, index);
if (pampd == NULL)
goto out;
if (free) {
if (obj->pampd_count == 0) {
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
obj = NULL;
}
}
if (tmem_pamops.is_remote(pampd)) {
lock_held = false;
spin_unlock(&hb->lock);
}
if (free)
ret = (*tmem_pamops.get_data_and_free)(
data, size, raw, pampd, pool, oidp, index);
else
ret = (*tmem_pamops.get_data)(
data, size, raw, pampd, pool, oidp, index);
if (ret < 0)
goto out;
ret = 0;
out:
if (lock_held)
spin_unlock(&hb->lock);
disabled:
unlock_tmem_state();
return ret;
}
/*
* If a page in tmem matches the handle, "flush" this page from tmem such
* that any subsequent "get" does not succeed (unless, of course, there
* was another "put" with the same handle).
*/
int tmem_flush_page(struct tmem_pool *pool,
struct tmem_oid *oidp, uint32_t index)
{
struct tmem_obj *obj;
void *pampd;
int ret = -1;
struct tmem_hashbucket *hb;
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
spin_lock(&hb->lock);
obj = tmem_obj_find(hb, oidp);
if (obj == NULL)
goto out;
pampd = tmem_pampd_delete_from_obj(obj, index);
if (pampd == NULL)
goto out;
(*tmem_pamops.free)(pampd, pool, oidp, index);
if (obj->pampd_count == 0) {
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
}
ret = 0;
out:
spin_unlock(&hb->lock);
return ret;
}
/*
* If a page in tmem matches the handle, replace the page so that any
* subsequent "get" gets the new page. Returns 0 if
* there was a page to replace, else returns -1.
*/
int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
uint32_t index, void *new_pampd)
{
struct tmem_obj *obj;
int ret = -1;
struct tmem_hashbucket *hb;
lock_tmem_state();
if (!tmem_enabled)
goto disabled;
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
spin_lock(&hb->lock);
obj = tmem_obj_find(hb, oidp);
if (obj == NULL)
goto out;
new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd);
ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
out:
spin_unlock(&hb->lock);
disabled:
unlock_tmem_state();
return ret;
}
/*
* "Flush" all pages in tmem matching this oid.
*/
int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
{
struct tmem_obj *obj;
struct tmem_hashbucket *hb;
int ret = -1;
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
spin_lock(&hb->lock);
obj = tmem_obj_find(hb, oidp);
if (obj == NULL)
goto out;
tmem_pampd_destroy_all_in_obj(obj);
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
ret = 0;
out:
spin_unlock(&hb->lock);
return ret;
}
/*
* "Flush" all pages (and tmem_objs) from this tmem_pool and disable
* all subsequent access to this tmem_pool.
*/
int tmem_destroy_pool(struct tmem_pool *pool)
{
int ret = -1;
if (pool == NULL)
goto out;
tmem_pool_flush(pool, 1);
ret = 0;
out:
return ret;
}
int tmem_flush_pool(struct tmem_pool *pool)
{
int ret = -1;
if (pool == NULL)
goto out;
tmem_pool_flush(pool, 0);
ret = 0;
out:
return ret;
}
static LIST_HEAD(tmem_global_pool_list);
/*
* Create a new tmem_pool with the provided flag and return
* a pool id provided by the tmem host implementation.
*/
void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
{
int persistent = flags & TMEM_POOL_PERSIST;
int shared = flags & TMEM_POOL_SHARED;
struct tmem_hashbucket *hb = &pool->hashbucket[0];
int i;
for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
hb->obj_rb_root = RB_ROOT;
spin_lock_init(&hb->lock);
}
INIT_LIST_HEAD(&pool->pool_list);
atomic_set(&pool->obj_count, 0);
SET_SENTINEL(pool, POOL);
list_add_tail(&pool->pool_list, &tmem_global_pool_list);
pool->persistent = persistent;
pool->shared = shared;
}
/* The following must be called with tmem state locked */
static void tmem_cleanup(void)
{
(*tmem_hostops.flush_all_obj)();
}
void tmem_enable(void)
{
pr_info("turning tmem on\n");
tmem_enabled = true;
(*tmem_hostops.control)(false);
}
void tmem_disable(void)
{
pr_info("turning tmem off\n");
tmem_enabled = false;
tmem_cleanup();
(*tmem_hostops.control)(true);
}
| gpl-2.0 |
anshulsahni/linux | sound/isa/adlib.c | 1630 | 2906 | /*
* AdLib FM card driver.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/isa.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/opl3.h>
#define CRD_NAME "AdLib FM"
#define DEV_NAME "adlib"
MODULE_DESCRIPTION(CRD_NAME);
MODULE_AUTHOR("Rene Herman");
MODULE_LICENSE("GPL");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE;
static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard.");
module_param_array(port, long, NULL, 0444);
MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver.");
static int snd_adlib_match(struct device *dev, unsigned int n)
{
if (!enable[n])
return 0;
if (port[n] == SNDRV_AUTO_PORT) {
dev_err(dev, "please specify port\n");
return 0;
}
return 1;
}
static void snd_adlib_free(struct snd_card *card)
{
release_and_free_resource(card->private_data);
}
static int snd_adlib_probe(struct device *dev, unsigned int n)
{
struct snd_card *card;
struct snd_opl3 *opl3;
int error;
error = snd_card_new(dev, index[n], id[n], THIS_MODULE, 0, &card);
if (error < 0) {
dev_err(dev, "could not create card\n");
return error;
}
card->private_data = request_region(port[n], 4, CRD_NAME);
if (!card->private_data) {
dev_err(dev, "could not grab ports\n");
error = -EBUSY;
goto out;
}
card->private_free = snd_adlib_free;
strcpy(card->driver, DEV_NAME);
strcpy(card->shortname, CRD_NAME);
sprintf(card->longname, CRD_NAME " at %#lx", port[n]);
error = snd_opl3_create(card, port[n], port[n] + 2, OPL3_HW_AUTO, 1, &opl3);
if (error < 0) {
dev_err(dev, "could not create OPL\n");
goto out;
}
error = snd_opl3_hwdep_new(opl3, 0, 0, NULL);
if (error < 0) {
dev_err(dev, "could not create FM\n");
goto out;
}
error = snd_card_register(card);
if (error < 0) {
dev_err(dev, "could not register card\n");
goto out;
}
dev_set_drvdata(dev, card);
return 0;
out: snd_card_free(card);
return error;
}
static int snd_adlib_remove(struct device *dev, unsigned int n)
{
snd_card_free(dev_get_drvdata(dev));
return 0;
}
static struct isa_driver snd_adlib_driver = {
.match = snd_adlib_match,
.probe = snd_adlib_probe,
.remove = snd_adlib_remove,
.driver = {
.name = DEV_NAME
}
};
static int __init alsa_card_adlib_init(void)
{
return isa_register_driver(&snd_adlib_driver, SNDRV_CARDS);
}
static void __exit alsa_card_adlib_exit(void)
{
isa_unregister_driver(&snd_adlib_driver);
}
module_init(alsa_card_adlib_init);
module_exit(alsa_card_adlib_exit);
| gpl-2.0 |
kcarden/android_kernel_lge_g4stylus | drivers/platform/x86/hp-wmi.c | 1886 | 24167 | /*
* HP WMI hotkeys
*
* Copyright (C) 2008 Red Hat <mjg@redhat.com>
* Copyright (C) 2010, 2011 Anssi Hannula <anssi.hannula@iki.fi>
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
* Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
* Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/rfkill.h>
#include <linux/string.h>
MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
#define HPWMI_DISPLAY_QUERY 0x1
#define HPWMI_HDDTEMP_QUERY 0x2
#define HPWMI_ALS_QUERY 0x3
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_WIRELESS2_QUERY 0x1b
enum hp_wmi_radio {
HPWMI_WIFI = 0,
HPWMI_BLUETOOTH = 1,
HPWMI_WWAN = 2,
HPWMI_GPS = 3,
};
enum hp_wmi_event_ids {
HPWMI_DOCK_EVENT = 1,
HPWMI_PARK_HDD = 2,
HPWMI_SMART_ADAPTER = 3,
HPWMI_BEZEL_BUTTON = 4,
HPWMI_WIRELESS = 5,
HPWMI_CPU_BATTERY_THROTTLE = 6,
HPWMI_LOCK_SWITCH = 7,
HPWMI_LID_SWITCH = 8,
HPWMI_SCREEN_ROTATION = 9,
HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
HPWMI_PROXIMITY_SENSOR = 0x0C,
HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
HPWMI_PEAKSHIFT_PERIOD = 0x0F,
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
};
struct bios_args {
u32 signature;
u32 command;
u32 commandtype;
u32 datasize;
u32 data;
};
struct bios_return {
u32 sigpass;
u32 return_code;
};
enum hp_return_value {
HPWMI_RET_WRONG_SIGNATURE = 0x02,
HPWMI_RET_UNKNOWN_COMMAND = 0x03,
HPWMI_RET_UNKNOWN_CMDTYPE = 0x04,
HPWMI_RET_INVALID_PARAMETERS = 0x05,
};
enum hp_wireless2_bits {
HPWMI_POWER_STATE = 0x01,
HPWMI_POWER_SOFT = 0x02,
HPWMI_POWER_BIOS = 0x04,
HPWMI_POWER_HARD = 0x08,
};
#define IS_HWBLOCKED(x) ((x & (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) \
!= (HPWMI_POWER_BIOS | HPWMI_POWER_HARD))
#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
struct bios_rfkill2_device_state {
u8 radio_type;
u8 bus_type;
u16 vendor_id;
u16 product_id;
u16 subsys_vendor_id;
u16 subsys_product_id;
u8 rfkill_id;
u8 power;
u8 unknown[4];
};
/* 7 devices fit into the 128 byte buffer */
#define HPWMI_MAX_RFKILL2_DEVICES 7
struct bios_rfkill2_state {
u8 unknown[7];
u8 count;
u8 pad[8];
struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES];
};
static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x20e6, { KEY_PROG1 } },
{ KE_KEY, 0x20e8, { KEY_MEDIA } },
{ KE_KEY, 0x2142, { KEY_MEDIA } },
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_DIRECTION } },
{ KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
static struct input_dev *hp_wmi_input_dev;
static struct platform_device *hp_wmi_platform_dev;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
static struct rfkill *gps_rfkill;
struct rfkill2_device {
u8 id;
int num;
struct rfkill *rfkill;
};
static int rfkill2_count;
static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
/*
* hp_wmi_perform_query
*
* query: The commandtype -> What should be queried
* write: The command -> 0 read, 1 write, 3 ODM specific
* buffer: Buffer used as input and/or output
* insize: Size of input buffer
* outsize: Size of output buffer
*
* returns zero on success
* an HP WMI query specific error code (which is positive)
* -EINVAL if the query was not successful at all
* -EINVAL if the output buffer size exceeds buffersize
*
* Note: The buffersize must at least be the maximum of the input and output
* size. E.g. Battery info query (0x7) is defined to have 1 byte input
* and 128 byte output. The caller would do:
* buffer = kzalloc(128, GFP_KERNEL);
* ret = hp_wmi_perform_query(0x7, 0, buffer, 1, 128)
*/
static int hp_wmi_perform_query(int query, int write, void *buffer,
int insize, int outsize)
{
struct bios_return *bios_return;
int actual_outsize;
union acpi_object *obj;
struct bios_args args = {
.signature = 0x55434553,
.command = write ? 0x2 : 0x1,
.commandtype = query,
.datasize = insize,
.data = 0,
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
u32 rc;
if (WARN_ON(insize > sizeof(args.data)))
return -EINVAL;
memcpy(&args.data, buffer, insize);
wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output);
obj = output.pointer;
if (!obj)
return -EINVAL;
else if (obj->type != ACPI_TYPE_BUFFER) {
kfree(obj);
return -EINVAL;
}
bios_return = (struct bios_return *)obj->buffer.pointer;
rc = bios_return->return_code;
if (rc) {
if (rc != HPWMI_RET_UNKNOWN_CMDTYPE)
pr_warn("query 0x%x returned error 0x%x\n", query, rc);
kfree(obj);
return rc;
}
if (!outsize) {
/* ignore output data */
kfree(obj);
return 0;
}
actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return)));
memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize);
memset(buffer + actual_outsize, 0, outsize - actual_outsize);
kfree(obj);
return 0;
}
static int hp_wmi_display_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return state;
}
static int hp_wmi_hddtemp_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return state;
}
static int hp_wmi_als_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return state;
}
static int hp_wmi_dock_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return -EINVAL;
return state & 0x1;
}
static int hp_wmi_tablet_state(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
return ret;
return (state & 0x4) ? 1 : 0;
}
static int hp_wmi_set_block(void *data, bool blocked)
{
enum hp_wmi_radio r = (enum hp_wmi_radio) data;
int query = BIT(r + 8) | ((!blocked) << r);
int ret;
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
&query, sizeof(query), 0);
if (ret)
return -EINVAL;
return 0;
}
static const struct rfkill_ops hp_wmi_rfkill_ops = {
.set_block = hp_wmi_set_block,
};
static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
{
int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
&wireless, sizeof(wireless),
sizeof(wireless));
/* TBD: Pass error */
mask = 0x200 << (r * 8);
if (wireless & mask)
return false;
else
return true;
}
static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
{
int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
&wireless, sizeof(wireless),
sizeof(wireless));
/* TBD: Pass error */
mask = 0x800 << (r * 8);
if (wireless & mask)
return false;
else
return true;
}
static int hp_wmi_rfkill2_set_block(void *data, bool blocked)
{
int rfkill_id = (int)(long)data;
char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked };
if (hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 1,
buffer, sizeof(buffer), 0))
return -EINVAL;
return 0;
}
static const struct rfkill_ops hp_wmi_rfkill2_ops = {
.set_block = hp_wmi_rfkill2_set_block,
};
static int hp_wmi_rfkill2_refresh(void)
{
int err, i;
struct bios_rfkill2_state state;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state,
0, sizeof(state));
if (err)
return err;
for (i = 0; i < rfkill2_count; i++) {
int num = rfkill2[i].num;
struct bios_rfkill2_device_state *devstate;
devstate = &state.device[num];
if (num >= state.count ||
devstate->rfkill_id != rfkill2[i].id) {
pr_warn("power configuration of the wireless devices unexpectedly changed\n");
continue;
}
rfkill_set_states(rfkill2[i].rfkill,
IS_SWBLOCKED(devstate->power),
IS_HWBLOCKED(devstate->power));
}
return 0;
}
static ssize_t show_display(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_display_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t show_hddtemp(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_hddtemp_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t show_als(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_als_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t show_dock(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_dock_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t show_tablet(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_tablet_state();
if (value < 0)
return -EINVAL;
return sprintf(buf, "%d\n", value);
}
static ssize_t set_als(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 tmp = simple_strtoul(buf, NULL, 10);
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
return -EINVAL;
return count;
}
static DEVICE_ATTR(display, S_IRUGO, show_display, NULL);
static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL);
static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
static DEVICE_ATTR(tablet, S_IRUGO, show_tablet, NULL);
static void hp_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
u32 event_id, event_data;
int key_code = 0, ret;
u32 *location;
acpi_status status;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
pr_info("bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
pr_info("Unknown response received %d\n", obj->type);
kfree(obj);
return;
}
/*
* Depending on ACPI version the concatenation of id and event data
* inside _WED function will result in a 8 or 16 byte buffer.
*/
location = (u32 *)obj->buffer.pointer;
if (obj->buffer.length == 8) {
event_id = *location;
event_data = *(location + 1);
} else if (obj->buffer.length == 16) {
event_id = *location;
event_data = *(location + 2);
} else {
pr_info("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return;
}
kfree(obj);
switch (event_id) {
case HPWMI_DOCK_EVENT:
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_dock_state());
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
break;
case HPWMI_PARK_HDD:
break;
case HPWMI_SMART_ADAPTER:
break;
case HPWMI_BEZEL_BUTTON:
ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
&key_code,
sizeof(key_code),
sizeof(key_code));
if (ret)
break;
if (!sparse_keymap_report_event(hp_wmi_input_dev,
key_code, 1, true))
pr_info("Unknown key code - 0x%x\n", key_code);
break;
case HPWMI_WIRELESS:
if (rfkill2_count) {
hp_wmi_rfkill2_refresh();
break;
}
if (wifi_rfkill)
rfkill_set_states(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI),
hp_wmi_get_hw_state(HPWMI_WIFI));
if (bluetooth_rfkill)
rfkill_set_states(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
if (wwan_rfkill)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
if (gps_rfkill)
rfkill_set_states(gps_rfkill,
hp_wmi_get_sw_state(HPWMI_GPS),
hp_wmi_get_hw_state(HPWMI_GPS));
break;
case HPWMI_CPU_BATTERY_THROTTLE:
pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
break;
case HPWMI_LOCK_SWITCH:
break;
case HPWMI_LID_SWITCH:
break;
case HPWMI_SCREEN_ROTATION:
break;
case HPWMI_COOLSENSE_SYSTEM_MOBILE:
break;
case HPWMI_COOLSENSE_SYSTEM_HOT:
break;
case HPWMI_PROXIMITY_SENSOR:
break;
case HPWMI_BACKLIT_KB_BRIGHTNESS:
break;
case HPWMI_PEAKSHIFT_PERIOD:
break;
case HPWMI_BATTERY_CHARGE_PERIOD:
break;
default:
pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
}
}
static int __init hp_wmi_input_setup(void)
{
acpi_status status;
int err;
hp_wmi_input_dev = input_allocate_device();
if (!hp_wmi_input_dev)
return -ENOMEM;
hp_wmi_input_dev->name = "HP WMI hotkeys";
hp_wmi_input_dev->phys = "wmi/input0";
hp_wmi_input_dev->id.bustype = BUS_HOST;
__set_bit(EV_SW, hp_wmi_input_dev->evbit);
__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
if (err)
goto err_free_dev;
/* Set initial hardware state */
input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
goto err_free_keymap;
}
err = input_register_device(hp_wmi_input_dev);
if (err)
goto err_uninstall_notifier;
return 0;
err_uninstall_notifier:
wmi_remove_notify_handler(HPWMI_EVENT_GUID);
err_free_keymap:
sparse_keymap_free(hp_wmi_input_dev);
err_free_dev:
input_free_device(hp_wmi_input_dev);
return err;
}
static void hp_wmi_input_destroy(void)
{
wmi_remove_notify_handler(HPWMI_EVENT_GUID);
sparse_keymap_free(hp_wmi_input_dev);
input_unregister_device(hp_wmi_input_dev);
}
static void cleanup_sysfs(struct platform_device *device)
{
device_remove_file(&device->dev, &dev_attr_display);
device_remove_file(&device->dev, &dev_attr_hddtemp);
device_remove_file(&device->dev, &dev_attr_als);
device_remove_file(&device->dev, &dev_attr_dock);
device_remove_file(&device->dev, &dev_attr_tablet);
}
static int hp_wmi_rfkill_setup(struct platform_device *device)
{
int err;
int wireless = 0;
err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
sizeof(wireless), sizeof(wireless));
if (err)
return err;
if (wireless & 0x1) {
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
RFKILL_TYPE_WLAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WIFI);
if (!wifi_rfkill)
return -ENOMEM;
rfkill_init_sw_state(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI));
rfkill_set_hw_state(wifi_rfkill,
hp_wmi_get_hw_state(HPWMI_WIFI));
err = rfkill_register(wifi_rfkill);
if (err)
goto register_wifi_error;
}
if (wireless & 0x2) {
bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev,
RFKILL_TYPE_BLUETOOTH,
&hp_wmi_rfkill_ops,
(void *) HPWMI_BLUETOOTH);
if (!bluetooth_rfkill) {
err = -ENOMEM;
goto register_wifi_error;
}
rfkill_init_sw_state(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
rfkill_set_hw_state(bluetooth_rfkill,
hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
err = rfkill_register(bluetooth_rfkill);
if (err)
goto register_bluetooth_error;
}
if (wireless & 0x4) {
wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev,
RFKILL_TYPE_WWAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WWAN);
if (!wwan_rfkill) {
err = -ENOMEM;
goto register_gps_error;
}
rfkill_init_sw_state(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN));
rfkill_set_hw_state(wwan_rfkill,
hp_wmi_get_hw_state(HPWMI_WWAN));
err = rfkill_register(wwan_rfkill);
if (err)
goto register_wwan_err;
}
if (wireless & 0x8) {
gps_rfkill = rfkill_alloc("hp-gps", &device->dev,
RFKILL_TYPE_GPS,
&hp_wmi_rfkill_ops,
(void *) HPWMI_GPS);
if (!gps_rfkill) {
err = -ENOMEM;
goto register_bluetooth_error;
}
rfkill_init_sw_state(gps_rfkill,
hp_wmi_get_sw_state(HPWMI_GPS));
rfkill_set_hw_state(gps_rfkill,
hp_wmi_get_hw_state(HPWMI_GPS));
err = rfkill_register(gps_rfkill);
if (err)
goto register_gps_error;
}
return 0;
register_wwan_err:
rfkill_destroy(wwan_rfkill);
wwan_rfkill = NULL;
if (gps_rfkill)
rfkill_unregister(gps_rfkill);
register_gps_error:
rfkill_destroy(gps_rfkill);
gps_rfkill = NULL;
if (bluetooth_rfkill)
rfkill_unregister(bluetooth_rfkill);
register_bluetooth_error:
rfkill_destroy(bluetooth_rfkill);
bluetooth_rfkill = NULL;
if (wifi_rfkill)
rfkill_unregister(wifi_rfkill);
register_wifi_error:
rfkill_destroy(wifi_rfkill);
wifi_rfkill = NULL;
return err;
}
static int hp_wmi_rfkill2_setup(struct platform_device *device)
{
int err, i;
struct bios_rfkill2_state state;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state,
0, sizeof(state));
if (err)
return err;
if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
pr_warn("unable to parse 0x1b query output\n");
return -EINVAL;
}
for (i = 0; i < state.count; i++) {
struct rfkill *rfkill;
enum rfkill_type type;
char *name;
switch (state.device[i].radio_type) {
case HPWMI_WIFI:
type = RFKILL_TYPE_WLAN;
name = "hp-wifi";
break;
case HPWMI_BLUETOOTH:
type = RFKILL_TYPE_BLUETOOTH;
name = "hp-bluetooth";
break;
case HPWMI_WWAN:
type = RFKILL_TYPE_WWAN;
name = "hp-wwan";
break;
case HPWMI_GPS:
type = RFKILL_TYPE_GPS;
name = "hp-gps";
break;
default:
pr_warn("unknown device type 0x%x\n",
state.device[i].radio_type);
continue;
}
if (!state.device[i].vendor_id) {
pr_warn("zero device %d while %d reported\n",
i, state.count);
continue;
}
rfkill = rfkill_alloc(name, &device->dev, type,
&hp_wmi_rfkill2_ops, (void *)(long)i);
if (!rfkill) {
err = -ENOMEM;
goto fail;
}
rfkill2[rfkill2_count].id = state.device[i].rfkill_id;
rfkill2[rfkill2_count].num = i;
rfkill2[rfkill2_count].rfkill = rfkill;
rfkill_init_sw_state(rfkill,
IS_SWBLOCKED(state.device[i].power));
rfkill_set_hw_state(rfkill,
IS_HWBLOCKED(state.device[i].power));
if (!(state.device[i].power & HPWMI_POWER_BIOS))
pr_info("device %s blocked by BIOS\n", name);
err = rfkill_register(rfkill);
if (err) {
rfkill_destroy(rfkill);
goto fail;
}
rfkill2_count++;
}
return 0;
fail:
for (; rfkill2_count > 0; rfkill2_count--) {
rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill);
rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill);
}
return err;
}
static int __init hp_wmi_bios_setup(struct platform_device *device)
{
int err;
/* clear detected rfkill devices */
wifi_rfkill = NULL;
bluetooth_rfkill = NULL;
wwan_rfkill = NULL;
gps_rfkill = NULL;
rfkill2_count = 0;
if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
err = device_create_file(&device->dev, &dev_attr_display);
if (err)
goto add_sysfs_error;
err = device_create_file(&device->dev, &dev_attr_hddtemp);
if (err)
goto add_sysfs_error;
err = device_create_file(&device->dev, &dev_attr_als);
if (err)
goto add_sysfs_error;
err = device_create_file(&device->dev, &dev_attr_dock);
if (err)
goto add_sysfs_error;
err = device_create_file(&device->dev, &dev_attr_tablet);
if (err)
goto add_sysfs_error;
return 0;
add_sysfs_error:
cleanup_sysfs(device);
return err;
}
static int __exit hp_wmi_bios_remove(struct platform_device *device)
{
int i;
cleanup_sysfs(device);
for (i = 0; i < rfkill2_count; i++) {
rfkill_unregister(rfkill2[i].rfkill);
rfkill_destroy(rfkill2[i].rfkill);
}
if (wifi_rfkill) {
rfkill_unregister(wifi_rfkill);
rfkill_destroy(wifi_rfkill);
}
if (bluetooth_rfkill) {
rfkill_unregister(bluetooth_rfkill);
rfkill_destroy(bluetooth_rfkill);
}
if (wwan_rfkill) {
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
if (gps_rfkill) {
rfkill_unregister(gps_rfkill);
rfkill_destroy(gps_rfkill);
}
return 0;
}
static int hp_wmi_resume_handler(struct device *device)
{
/*
* Hardware state may have changed while suspended, so trigger
* input events for the current state. As this is a switch,
* the input layer will only actually pass it on if the state
* changed.
*/
if (hp_wmi_input_dev) {
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_dock_state());
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
}
if (rfkill2_count)
hp_wmi_rfkill2_refresh();
if (wifi_rfkill)
rfkill_set_states(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI),
hp_wmi_get_hw_state(HPWMI_WIFI));
if (bluetooth_rfkill)
rfkill_set_states(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
if (wwan_rfkill)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
if (gps_rfkill)
rfkill_set_states(gps_rfkill,
hp_wmi_get_sw_state(HPWMI_GPS),
hp_wmi_get_hw_state(HPWMI_GPS));
return 0;
}
static const struct dev_pm_ops hp_wmi_pm_ops = {
.resume = hp_wmi_resume_handler,
.restore = hp_wmi_resume_handler,
};
static struct platform_driver hp_wmi_driver = {
.driver = {
.name = "hp-wmi",
.owner = THIS_MODULE,
.pm = &hp_wmi_pm_ops,
},
.remove = __exit_p(hp_wmi_bios_remove),
};
static int __init hp_wmi_init(void)
{
int err;
int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
if (!bios_capable && !event_capable)
return -ENODEV;
if (event_capable) {
err = hp_wmi_input_setup();
if (err)
return err;
}
if (bios_capable) {
hp_wmi_platform_dev =
platform_device_register_simple("hp-wmi", -1, NULL, 0);
if (IS_ERR(hp_wmi_platform_dev)) {
err = PTR_ERR(hp_wmi_platform_dev);
goto err_destroy_input;
}
err = platform_driver_probe(&hp_wmi_driver, hp_wmi_bios_setup);
if (err)
goto err_unregister_device;
}
return 0;
err_unregister_device:
platform_device_unregister(hp_wmi_platform_dev);
err_destroy_input:
if (event_capable)
hp_wmi_input_destroy();
return err;
}
module_init(hp_wmi_init);
static void __exit hp_wmi_exit(void)
{
if (wmi_has_guid(HPWMI_EVENT_GUID))
hp_wmi_input_destroy();
if (hp_wmi_platform_dev) {
platform_device_unregister(hp_wmi_platform_dev);
platform_driver_unregister(&hp_wmi_driver);
}
}
module_exit(hp_wmi_exit);
| gpl-2.0 |
omnirom/android_kernel_asus_fugu | drivers/staging/vt6656/main_usb.c | 1886 | 44587 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: main_usb.c
*
* Purpose: driver entry for initial, open, close, tx and rx.
*
* Author: Lyndon Chen
*
* Date: Dec 8, 2005
*
* Functions:
*
* vt6656_probe - module initial (insmod) driver entry
* device_remove1 - module remove entry
* device_open - allocate dma/descripter resource & initial mac/bbp function
* device_xmit - asynchronous data tx function
* device_set_multi - set mac filter
* device_ioctl - ioctl entry
* device_close - shutdown mac/bbp & free dma/descriptor resource
* device_alloc_frag_buf - rx fragement pre-allocated function
* device_free_tx_bufs - free tx buffer function
* device_dma0_tx_80211- tx 802.11 frame via dma0
* device_dma0_xmit- tx PS buffered frame via dma0
* device_init_registers- initial MAC & BBP & RF internal registers.
* device_init_rings- initial tx/rx ring buffer
* device_init_defrag_cb- initial & allocate de-fragement buffer.
* device_tx_srv- tx interrupt service function
*
* Revision History:
*/
#undef __NO_VERSION__
#include <linux/file.h>
#include "device.h"
#include "card.h"
#include "baseband.h"
#include "mac.h"
#include "tether.h"
#include "wmgr.h"
#include "wctl.h"
#include "power.h"
#include "wcmd.h"
#include "iocmd.h"
#include "tcrc.h"
#include "rxtx.h"
#include "bssdb.h"
#include "hostap.h"
#include "wpactl.h"
#include "iwctl.h"
#include "dpc.h"
#include "datarate.h"
#include "rf.h"
#include "firmware.h"
#include "rndis.h"
#include "control.h"
#include "channel.h"
#include "int.h"
#include "iowpa.h"
/* static int msglevel = MSG_LEVEL_DEBUG; */
static int msglevel =MSG_LEVEL_INFO;
/*
* define module options
*/
/* version information */
#define DRIVER_AUTHOR \
"VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DEVICE_FULL_DRV_NAM);
#define DEVICE_PARAM(N,D) \
static int N[MAX_UINTS]=OPTION_DEFAULT;\
module_param_array(N, int, NULL, 0);\
MODULE_PARM_DESC(N, D);
#define RX_DESC_DEF0 64
DEVICE_PARAM(RxDescriptors0,"Number of receive usb desc buffer");
#define TX_DESC_DEF0 64
DEVICE_PARAM(TxDescriptors0,"Number of transmit usb desc buffer");
#define CHANNEL_DEF 6
DEVICE_PARAM(Channel, "Channel number");
/* PreambleType[] is the preamble length used for transmit.
0: indicate allows long preamble type
1: indicate allows short preamble type
*/
#define PREAMBLE_TYPE_DEF 1
DEVICE_PARAM(PreambleType, "Preamble Type");
#define RTS_THRESH_DEF 2347
DEVICE_PARAM(RTSThreshold, "RTS threshold");
#define FRAG_THRESH_DEF 2346
DEVICE_PARAM(FragThreshold, "Fragmentation threshold");
#define DATA_RATE_DEF 13
/* datarate[] index
0: indicate 1 Mbps 0x02
1: indicate 2 Mbps 0x04
2: indicate 5.5 Mbps 0x0B
3: indicate 11 Mbps 0x16
4: indicate 6 Mbps 0x0c
5: indicate 9 Mbps 0x12
6: indicate 12 Mbps 0x18
7: indicate 18 Mbps 0x24
8: indicate 24 Mbps 0x30
9: indicate 36 Mbps 0x48
10: indicate 48 Mbps 0x60
11: indicate 54 Mbps 0x6c
12: indicate 72 Mbps 0x90
13: indicate auto rate
*/
DEVICE_PARAM(ConnectionRate, "Connection data rate");
#define OP_MODE_DEF 0
DEVICE_PARAM(OPMode, "Infrastruct, adhoc, AP mode ");
/* OpMode[] is used for transmit.
0: indicate infrastruct mode used
1: indicate adhoc mode used
2: indicate AP mode used
*/
/* PSMode[]
0: indicate disable power saving mode
1: indicate enable power saving mode
*/
#define PS_MODE_DEF 0
DEVICE_PARAM(PSMode, "Power saving mode");
#define SHORT_RETRY_DEF 8
DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
#define LONG_RETRY_DEF 4
DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
/* BasebandType[] baseband type selected
0: indicate 802.11a type
1: indicate 802.11b type
2: indicate 802.11g type
*/
#define BBP_TYPE_DEF 2
DEVICE_PARAM(BasebandType, "baseband type");
/* 80211hEnable[]
0: indicate disable 802.11h
1: indicate enable 802.11h
*/
#define X80211h_MODE_DEF 0
DEVICE_PARAM(b80211hEnable, "802.11h mode");
/*
* Static vars definitions
*/
static struct usb_device_id vt6656_table[] = {
{USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
{}
};
/* frequency list (map channels to frequencies) */
/*
static const long frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484,
4915, 4920, 4925, 4935, 4940, 4945, 4960, 4980,
5035, 5040, 5045, 5055, 5060, 5080, 5170, 5180, 5190, 5200, 5210, 5220, 5230, 5240,
5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680,
5700, 5745, 5765, 5785, 5805, 5825
};
static const struct iw_handler_def iwctl_handler_def;
*/
static int vt6656_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void vt6656_disconnect(struct usb_interface *intf);
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
static int vt6656_suspend(struct usb_interface *intf, pm_message_t message);
static int vt6656_resume(struct usb_interface *intf);
#endif /* CONFIG_PM */
static struct net_device_stats *device_get_stats(struct net_device *dev);
static int device_open(struct net_device *dev);
static int device_xmit(struct sk_buff *skb, struct net_device *dev);
static void device_set_multi(struct net_device *dev);
static int device_close(struct net_device *dev);
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int device_init_registers(struct vnt_private *pDevice,
DEVICE_INIT_TYPE InitType);
static bool device_init_defrag_cb(struct vnt_private *pDevice);
static void device_init_diversity_timer(struct vnt_private *pDevice);
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev);
static int ethtool_ioctl(struct net_device *dev, void *useraddr);
static void device_free_tx_bufs(struct vnt_private *pDevice);
static void device_free_rx_bufs(struct vnt_private *pDevice);
static void device_free_int_bufs(struct vnt_private *pDevice);
static void device_free_frag_bufs(struct vnt_private *pDevice);
static bool device_alloc_bufs(struct vnt_private *pDevice);
static int Read_config_file(struct vnt_private *pDevice);
static unsigned char *Config_FileOperation(struct vnt_private *pDevice);
static int Config_FileGetParameter(unsigned char *string,
unsigned char *dest,
unsigned char *source);
static void usb_device_reset(struct vnt_private *pDevice);
static void
device_set_options(struct vnt_private *pDevice) {
u8 abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
u8 abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN);
pDevice->cbTD = TX_DESC_DEF0;
pDevice->cbRD = RX_DESC_DEF0;
pDevice->uChannel = CHANNEL_DEF;
pDevice->wRTSThreshold = RTS_THRESH_DEF;
pDevice->wFragmentationThreshold = FRAG_THRESH_DEF;
pDevice->byShortRetryLimit = SHORT_RETRY_DEF;
pDevice->byLongRetryLimit = LONG_RETRY_DEF;
pDevice->wMaxTransmitMSDULifetime = DEFAULT_MSDU_LIFETIME;
pDevice->byShortPreamble = PREAMBLE_TYPE_DEF;
pDevice->ePSMode = PS_MODE_DEF;
pDevice->b11hEnable = X80211h_MODE_DEF;
pDevice->eOPMode = OP_MODE_DEF;
pDevice->uConnectionRate = DATA_RATE_DEF;
if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = true;
pDevice->byBBType = BBP_TYPE_DEF;
pDevice->byPacketType = pDevice->byBBType;
pDevice->byAutoFBCtrl = AUTO_FB_0;
pDevice->bUpdateBBVGA = true;
pDevice->byFOETuning = 0;
pDevice->byAutoPwrTunning = 0;
pDevice->wCTSDuration = 0;
pDevice->byPreambleType = 0;
pDevice->bExistSWNetAddr = false;
/* pDevice->bDiversityRegCtlON = true; */
pDevice->bDiversityRegCtlON = false;
}
static void device_init_diversity_timer(struct vnt_private *pDevice)
{
init_timer(&pDevice->TimerSQ3Tmax1);
pDevice->TimerSQ3Tmax1.data = (unsigned long)pDevice;
pDevice->TimerSQ3Tmax1.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax2);
pDevice->TimerSQ3Tmax2.data = (unsigned long)pDevice;
pDevice->TimerSQ3Tmax2.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax3);
pDevice->TimerSQ3Tmax3.data = (unsigned long)pDevice;
pDevice->TimerSQ3Tmax3.function = (TimerFunction)TimerSQ3Tmax3CallBack;
pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ);
return;
}
/*
* initialization of MAC & BBP registers
*/
static int device_init_registers(struct vnt_private *pDevice,
DEVICE_INIT_TYPE InitType)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u8 abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 abySNAP_RFC1042[ETH_ALEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
u8 abySNAP_Bridgetunnel[ETH_ALEN]
= {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8};
u8 byAntenna;
int ii;
CMD_CARD_INIT sInitCmd;
int ntStatus = STATUS_SUCCESS;
RSP_CARD_INIT sInitRsp;
u8 byTmp;
u8 byCalibTXIQ = 0, byCalibTXDC = 0, byCalibRXIQ = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---->INIbInitAdapter. [%d][%d]\n", InitType, pDevice->byPacketType);
spin_lock_irq(&pDevice->lock);
if (InitType == DEVICE_INIT_COLD) {
memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
memcpy(pDevice->abySNAP_Bridgetunnel,
abySNAP_Bridgetunnel,
ETH_ALEN);
if ( !FIRMWAREbCheckVersion(pDevice) ) {
if (FIRMWAREbDownload(pDevice) == true) {
if (FIRMWAREbBrach2Sram(pDevice) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbBrach2Sram fail \n");
spin_unlock_irq(&pDevice->lock);
return false;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbDownload fail \n");
spin_unlock_irq(&pDevice->lock);
return false;
}
}
if ( !BBbVT3184Init(pDevice) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" BBbVT3184Init fail \n");
spin_unlock_irq(&pDevice->lock);
return false;
}
}
sInitCmd.byInitClass = (u8)InitType;
sInitCmd.bExistSWNetAddr = (u8) pDevice->bExistSWNetAddr;
for (ii = 0; ii < 6; ii++)
sInitCmd.bySWNetAddr[ii] = pDevice->abyCurrentNetAddr[ii];
sInitCmd.byShortRetryLimit = pDevice->byShortRetryLimit;
sInitCmd.byLongRetryLimit = pDevice->byLongRetryLimit;
/* issue card_init command to device */
ntStatus = CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_CARDINIT,
0,
0,
sizeof(CMD_CARD_INIT),
(u8 *) &(sInitCmd));
if ( ntStatus != STATUS_SUCCESS ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Issue Card init fail \n");
spin_unlock_irq(&pDevice->lock);
return false;
}
if (InitType == DEVICE_INIT_COLD) {
ntStatus = CONTROLnsRequestIn(pDevice,MESSAGE_TYPE_INIT_RSP,0,0,sizeof(RSP_CARD_INIT), (u8 *) &(sInitRsp));
if (ntStatus != STATUS_SUCCESS) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Cardinit request in status fail!\n");
spin_unlock_irq(&pDevice->lock);
return false;
}
/* local ID for AES functions */
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
MAC_REG_LOCALID,
MESSAGE_REQUEST_MACREG,
1,
&pDevice->byLocalID);
if ( ntStatus != STATUS_SUCCESS ) {
spin_unlock_irq(&pDevice->lock);
return false;
}
/* do MACbSoftwareReset in MACvInitialize */
/* force CCK */
pDevice->bCCK = true;
pDevice->bProtectMode = false;
/* only used in 11g type, sync with ERP IE */
pDevice->bNonERPPresent = false;
pDevice->bBarkerPreambleMd = false;
if ( pDevice->bFixRate ) {
pDevice->wCurrentRate = (u16) pDevice->uConnectionRate;
} else {
if ( pDevice->byBBType == BB_TYPE_11B )
pDevice->wCurrentRate = RATE_11M;
else
pDevice->wCurrentRate = RATE_54M;
}
CHvInitChannelTable(pDevice);
pDevice->byTopOFDMBasicRate = RATE_24M;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byRevId = 0;
/* target to IF pin while programming to RF chip */
pDevice->byCurPwr = 0xFF;
pDevice->byCCKPwr = pDevice->abyEEPROM[EEP_OFS_PWR_CCK];
pDevice->byOFDMPwrG = pDevice->abyEEPROM[EEP_OFS_PWR_OFDMG];
/* load power table */
for (ii = 0; ii < 14; ii++) {
pDevice->abyCCKPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_CCK_PWR_TBL];
if (pDevice->abyCCKPwrTbl[ii] == 0)
pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr;
pDevice->abyOFDMPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDM_PWR_TBL];
if (pDevice->abyOFDMPwrTbl[ii] == 0)
pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG;
}
/*
* original zonetype is USA, but custom zonetype is Europe,
* then need to recover 12, 13, 14 channels with 11 channel
*/
if(((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe))&&
(pDevice->byOriginalZonetype == ZoneType_USA)) {
for (ii = 11; ii < 14; ii++) {
pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
}
}
pDevice->byOFDMPwrA = 0x34; /* same as RFbMA2829SelectChannel */
/* load OFDM A power table */
for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
pDevice->abyOFDMAPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDMA_PWR_TBL];
if (pDevice->abyOFDMAPwrTbl[ii] == 0)
pDevice->abyOFDMAPwrTbl[ii] = pDevice->byOFDMPwrA;
}
byAntenna = pDevice->abyEEPROM[EEP_OFS_ANTENNA];
if (byAntenna & EEP_ANTINV)
pDevice->bTxRxAntInv = true;
else
pDevice->bTxRxAntInv = false;
byAntenna &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
if (byAntenna == 0) /* if not set default is both */
byAntenna = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
if (byAntenna == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
pDevice->byAntennaCount = 2;
pDevice->byTxAntennaMode = ANT_B;
pDevice->dwTxAntennaSel = 1;
pDevice->dwRxAntennaSel = 1;
if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
if (pDevice->bDiversityRegCtlON)
pDevice->bDiversityEnable = true;
else
pDevice->bDiversityEnable = false;
} else {
pDevice->bDiversityEnable = false;
pDevice->byAntennaCount = 1;
pDevice->dwTxAntennaSel = 0;
pDevice->dwRxAntennaSel = 0;
if (byAntenna & EEP_ANTENNA_AUX) {
pDevice->byTxAntennaMode = ANT_A;
if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_B;
else
pDevice->byRxAntennaMode = ANT_A;
} else {
pDevice->byTxAntennaMode = ANT_B;
if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
}
}
pDevice->ulDiversityNValue = 100*255;
pDevice->ulDiversityMValue = 100*16;
pDevice->byTMax = 1;
pDevice->byTMax2 = 4;
pDevice->ulSQ3TH = 0;
pDevice->byTMax3 = 64;
/* get Auto Fall Back type */
pDevice->byAutoFBCtrl = AUTO_FB_0;
/* set SCAN Time */
pDevice->uScanTime = WLAN_SCAN_MINITIME;
/* default Auto Mode */
/* pDevice->NetworkType = Ndis802_11Automode; */
pDevice->eConfigPHYMode = PHY_TYPE_AUTO;
pDevice->byBBType = BB_TYPE_11G;
/* initialize BBP registers */
pDevice->ulTxPower = 25;
/* get channel range */
pDevice->byMinChannel = 1;
pDevice->byMaxChannel = CB_MAX_CHANNEL;
/* get RFType */
pDevice->byRFType = sInitRsp.byRFType;
if ((pDevice->byRFType & RF_EMU) != 0) {
/* force change RevID for VT3253 emu */
pDevice->byRevId = 0x80;
}
/* load vt3266 calibration parameters in EEPROM */
if (pDevice->byRFType == RF_VT3226D0) {
if((pDevice->abyEEPROM[EEP_OFS_MAJOR_VER] == 0x1) &&
(pDevice->abyEEPROM[EEP_OFS_MINOR_VER] >= 0x4)) {
byCalibTXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_IQ];
byCalibTXDC = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_DC];
byCalibRXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_RX_IQ];
if( (byCalibTXIQ || byCalibTXDC || byCalibRXIQ) ) {
/* CR255, enable TX/RX IQ and DC compensation mode */
ControlvWriteByte(pDevice,
MESSAGE_REQUEST_BBREG,
0xFF,
0x03);
/* CR251, TX I/Q Imbalance Calibration */
ControlvWriteByte(pDevice,
MESSAGE_REQUEST_BBREG,
0xFB,
byCalibTXIQ);
/* CR252, TX DC-Offset Calibration */
ControlvWriteByte(pDevice,
MESSAGE_REQUEST_BBREG,
0xFC,
byCalibTXDC);
/* CR253, RX I/Q Imbalance Calibration */
ControlvWriteByte(pDevice,
MESSAGE_REQUEST_BBREG,
0xFD,
byCalibRXIQ);
} else {
/* CR255, turn off BB Calibration compensation */
ControlvWriteByte(pDevice,
MESSAGE_REQUEST_BBREG,
0xFF,
0x0);
}
}
}
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
pMgmt->uCurrChannel = pDevice->uChannel;
pMgmt->uIBSSChannel = pDevice->uChannel;
CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel);
/* get permanent network address */
memcpy(pDevice->abyPermanentNetAddr,&(sInitRsp.byNetAddr[0]),6);
memcpy(pDevice->abyCurrentNetAddr,
pDevice->abyPermanentNetAddr,
ETH_ALEN);
/* if exist SW network address, use it */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %pM\n",
pDevice->abyCurrentNetAddr);
}
/*
* set BB and packet type at the same time
* set Short Slot Time, xIFS, and RSPINF
*/
if (pDevice->byBBType == BB_TYPE_11A) {
CARDbAddBasicRate(pDevice, RATE_6M);
pDevice->bShortSlotTime = true;
} else {
CARDbAddBasicRate(pDevice, RATE_1M);
pDevice->bShortSlotTime = false;
}
BBvSetShortSlotTime(pDevice);
CARDvSetBSSMode(pDevice);
if (pDevice->bUpdateBBVGA) {
pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
pDevice->byBBVGANew = pDevice->byBBVGACurrent;
BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
}
pDevice->byRadioCtl = pDevice->abyEEPROM[EEP_OFS_RADIOCTL];
pDevice->bHWRadioOff = false;
if ( (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) != 0 ) {
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
MAC_REG_GPIOCTL1,
MESSAGE_REQUEST_MACREG,
1,
&byTmp);
if ( ntStatus != STATUS_SUCCESS ) {
spin_unlock_irq(&pDevice->lock);
return false;
}
if ( (byTmp & GPIO3_DATA) == 0 ) {
pDevice->bHWRadioOff = true;
MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
} else {
MACvRegBitsOff(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD);
pDevice->bHWRadioOff = false;
}
}
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_TMLEN,0x38);
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL0,0x01);
if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
CARDbRadioPowerOff(pDevice);
} else {
CARDbRadioPowerOn(pDevice);
}
spin_unlock_irq(&pDevice->lock);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----INIbInitAdapter Exit\n");
return true;
}
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
static int vt6656_suspend(struct usb_interface *intf, pm_message_t message)
{
struct vnt_private *device = usb_get_intfdata(intf);
if (!device || !device->dev)
return -ENODEV;
if (device->flags & DEVICE_FLAGS_OPENED)
device_close(device->dev);
return 0;
}
static int vt6656_resume(struct usb_interface *intf)
{
struct vnt_private *device = usb_get_intfdata(intf);
if (!device || !device->dev)
return -ENODEV;
if (!(device->flags & DEVICE_FLAGS_OPENED))
device_open(device->dev);
return 0;
}
#endif /* CONFIG_PM */
static const struct net_device_ops device_netdev_ops = {
.ndo_open = device_open,
.ndo_stop = device_close,
.ndo_do_ioctl = device_ioctl,
.ndo_get_stats = device_get_stats,
.ndo_start_xmit = device_xmit,
.ndo_set_rx_mode = device_set_multi,
};
static int
vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
u8 fake_mac[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
struct usb_device *udev = interface_to_usbdev(intf);
int rc = 0;
struct net_device *netdev = NULL;
struct vnt_private *pDevice;
printk(KERN_NOTICE "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
printk(KERN_NOTICE "Copyright (c) 2004 VIA Networking Technologies, Inc.\n");
udev = usb_get_dev(udev);
netdev = alloc_etherdev(sizeof(struct vnt_private));
if (!netdev) {
printk(KERN_ERR DEVICE_NAME ": allocate net device failed\n");
rc = -ENOMEM;
goto err_nomem;
}
pDevice = netdev_priv(netdev);
memset(pDevice, 0, sizeof(struct vnt_private));
pDevice->dev = netdev;
pDevice->usb = udev;
device_set_options(pDevice);
spin_lock_init(&pDevice->lock);
pDevice->tx_80211 = device_dma0_tx_80211;
pDevice->vnt_mgmt.pAdapter = (void *) pDevice;
netdev->netdev_ops = &device_netdev_ops;
netdev->wireless_handlers =
(struct iw_handler_def *) &iwctl_handler_def;
usb_set_intfdata(intf, pDevice);
SET_NETDEV_DEV(netdev, &intf->dev);
memcpy(pDevice->dev->dev_addr, fake_mac, ETH_ALEN);
rc = register_netdev(netdev);
if (rc) {
printk(KERN_ERR DEVICE_NAME " Failed to register netdev\n");
goto err_netdev;
}
usb_device_reset(pDevice);
return 0;
err_netdev:
free_netdev(netdev);
err_nomem:
usb_put_dev(udev);
return rc;
}
static void device_free_tx_bufs(struct vnt_private *pDevice)
{
PUSB_SEND_CONTEXT pTxContext;
int ii;
for (ii = 0; ii < pDevice->cbTD; ii++) {
pTxContext = pDevice->apTD[ii];
/* deallocate URBs */
if (pTxContext->pUrb) {
usb_kill_urb(pTxContext->pUrb);
usb_free_urb(pTxContext->pUrb);
}
kfree(pTxContext);
}
return;
}
static void device_free_rx_bufs(struct vnt_private *pDevice)
{
PRCB pRCB;
int ii;
for (ii = 0; ii < pDevice->cbRD; ii++) {
pRCB = pDevice->apRCB[ii];
/* deallocate URBs */
if (pRCB->pUrb) {
usb_kill_urb(pRCB->pUrb);
usb_free_urb(pRCB->pUrb);
}
/* deallocate skb */
if (pRCB->skb)
dev_kfree_skb(pRCB->skb);
}
kfree(pDevice->pRCBMem);
return;
}
static void usb_device_reset(struct vnt_private *pDevice)
{
int status;
status = usb_reset_device(pDevice->usb);
if (status)
printk("usb_device_reset fail status=%d\n",status);
return ;
}
static void device_free_int_bufs(struct vnt_private *pDevice)
{
kfree(pDevice->intBuf.pDataBuf);
return;
}
static bool device_alloc_bufs(struct vnt_private *pDevice)
{
PUSB_SEND_CONTEXT pTxContext;
PRCB pRCB;
int ii;
for (ii = 0; ii < pDevice->cbTD; ii++) {
pTxContext = kmalloc(sizeof(USB_SEND_CONTEXT), GFP_KERNEL);
if (pTxContext == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : allocate tx usb context failed\n", pDevice->dev->name);
goto free_tx;
}
pDevice->apTD[ii] = pTxContext;
pTxContext->pDevice = (void *) pDevice;
/* allocate URBs */
pTxContext->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
if (pTxContext->pUrb == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "alloc tx urb failed\n");
goto free_tx;
}
pTxContext->bBoolInUse = false;
}
/* allocate RCB mem */
pDevice->pRCBMem = kzalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
if (pDevice->pRCBMem == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name);
goto free_tx;
}
pDevice->FirstRecvFreeList = NULL;
pDevice->LastRecvFreeList = NULL;
pDevice->FirstRecvMngList = NULL;
pDevice->LastRecvMngList = NULL;
pDevice->NumRecvFreeList = 0;
pRCB = (PRCB) pDevice->pRCBMem;
for (ii = 0; ii < pDevice->cbRD; ii++) {
pDevice->apRCB[ii] = pRCB;
pRCB->pDevice = (void *) pDevice;
/* allocate URBs */
pRCB->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
if (pRCB->pUrb == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx urb\n");
goto free_rx_tx;
}
pRCB->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pRCB->skb == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx skb\n");
goto free_rx_tx;
}
pRCB->skb->dev = pDevice->dev;
pRCB->bBoolInUse = false;
EnqueueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList, pRCB);
pDevice->NumRecvFreeList++;
pRCB++;
}
pDevice->pControlURB = usb_alloc_urb(0, GFP_ATOMIC);
if (pDevice->pControlURB == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc control urb\n");
goto free_rx_tx;
}
pDevice->pInterruptURB = usb_alloc_urb(0, GFP_ATOMIC);
if (pDevice->pInterruptURB == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int urb\n");
usb_free_urb(pDevice->pControlURB);
goto free_rx_tx;
}
pDevice->intBuf.pDataBuf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
if (pDevice->intBuf.pDataBuf == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int buf\n");
usb_free_urb(pDevice->pControlURB);
usb_free_urb(pDevice->pInterruptURB);
goto free_rx_tx;
}
return true;
free_rx_tx:
device_free_rx_bufs(pDevice);
free_tx:
device_free_tx_bufs(pDevice);
return false;
}
static bool device_init_defrag_cb(struct vnt_private *pDevice)
{
int i;
PSDeFragControlBlock pDeF;
/* Init the fragment ctl entries */
for (i = 0; i < CB_MAX_RX_FRAG; i++) {
pDeF = &(pDevice->sRxDFCB[i]);
if (!device_alloc_frag_buf(pDevice, pDeF)) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc frag bufs\n",
pDevice->dev->name);
goto free_frag;
}
}
pDevice->cbDFCB = CB_MAX_RX_FRAG;
pDevice->cbFreeDFCB = pDevice->cbDFCB;
return true;
free_frag:
device_free_frag_bufs(pDevice);
return false;
}
static void device_free_frag_bufs(struct vnt_private *pDevice)
{
PSDeFragControlBlock pDeF;
int i;
for (i = 0; i < CB_MAX_RX_FRAG; i++) {
pDeF = &(pDevice->sRxDFCB[i]);
if (pDeF->skb)
dev_kfree_skb(pDeF->skb);
}
}
int device_alloc_frag_buf(struct vnt_private *pDevice,
PSDeFragControlBlock pDeF)
{
pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDeF->skb == NULL)
return false;
ASSERT(pDeF->skb);
pDeF->skb->dev = pDevice->dev;
return true;
}
static int device_open(struct net_device *dev)
{
struct vnt_private *pDevice = netdev_priv(dev);
pDevice->fWPA_Authened = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " device_open...\n");
pDevice->rx_buf_sz = MAX_TOTAL_SIZE_WITH_ALL_HEADERS;
if (device_alloc_bufs(pDevice) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " device_alloc_bufs fail... \n");
return -ENOMEM;
}
if (device_init_defrag_cb(pDevice)== false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Initial defragment cb fail \n");
goto free_rx_tx;
}
MP_CLEAR_FLAG(pDevice, fMP_DISCONNECTED);
MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS);
MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES);
MP_SET_FLAG(pDevice, fMP_POST_READS);
MP_SET_FLAG(pDevice, fMP_POST_WRITES);
/* read config file */
Read_config_file(pDevice);
if (device_init_registers(pDevice, DEVICE_INIT_COLD) == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " init register fail\n");
goto free_all;
}
device_set_multi(pDevice->dev);
/* init for key management */
KeyvInitTable(pDevice,&pDevice->sKey);
memcpy(pDevice->vnt_mgmt.abyMACAddr,
pDevice->abyCurrentNetAddr, ETH_ALEN);
memcpy(pDevice->dev->dev_addr, pDevice->abyCurrentNetAddr, ETH_ALEN);
pDevice->bStopTx0Pkt = false;
pDevice->bStopDataPkt = false;
pDevice->bRoaming = false;
pDevice->bIsRoaming = false;
pDevice->bEnableRoaming = false;
if (pDevice->bDiversityRegCtlON) {
device_init_diversity_timer(pDevice);
}
vMgrObjectInit(pDevice);
tasklet_init(&pDevice->RxMngWorkItem, (void *)RXvMngWorkItem, (unsigned long)pDevice);
tasklet_init(&pDevice->ReadWorkItem, (void *)RXvWorkItem, (unsigned long)pDevice);
tasklet_init(&pDevice->EventWorkItem, (void *)INTvWorkItem, (unsigned long)pDevice);
add_timer(&pDevice->vnt_mgmt.sTimerSecondCallback);
pDevice->int_interval = 100; /* max 100 microframes */
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pDevice->bIsRxWorkItemQueued = true;
pDevice->fKillEventPollingThread = false;
pDevice->bEventAvailable = false;
pDevice->bWPADEVUp = false;
pDevice->bwextstep0 = false;
pDevice->bwextstep1 = false;
pDevice->bwextstep2 = false;
pDevice->bwextstep3 = false;
pDevice->bWPASuppWextEnabled = false;
pDevice->byReAssocCount = 0;
RXvWorkItem(pDevice);
INTvWorkItem(pDevice);
/* if WEP key already set by iwconfig but device not yet open */
if ((pDevice->bEncryptionEnable == true) && (pDevice->bTransmitKey == true)) {
spin_lock_irq(&pDevice->lock);
KeybSetDefaultKey( pDevice,
&(pDevice->sKey),
pDevice->byKeyIndex | (1 << 31),
pDevice->uKeyLength,
NULL,
pDevice->abyKey,
KEY_CTL_WEP
);
spin_unlock_irq(&pDevice->lock);
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
}
if (pDevice->vnt_mgmt.eConfigMode == WMAC_CONFIG_AP)
bScheduleCommand((void *) pDevice, WLAN_CMD_RUN_AP, NULL);
else
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
netif_stop_queue(pDevice->dev);
pDevice->flags |= DEVICE_FLAGS_OPENED;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open success..\n");
return 0;
free_all:
device_free_frag_bufs(pDevice);
free_rx_tx:
device_free_rx_bufs(pDevice);
device_free_tx_bufs(pDevice);
device_free_int_bufs(pDevice);
usb_kill_urb(pDevice->pControlURB);
usb_kill_urb(pDevice->pInterruptURB);
usb_free_urb(pDevice->pControlURB);
usb_free_urb(pDevice->pInterruptURB);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open fail.. \n");
return -ENOMEM;
}
static int device_close(struct net_device *dev)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
int uu;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close1\n");
if (pDevice == NULL)
return -ENODEV;
if (pDevice->bLinkPass) {
bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL);
mdelay(30);
}
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
pMgmt->bShareKeyAlgorithm = false;
pDevice->bEncryptionEnable = false;
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
spin_lock_irq(&pDevice->lock);
for (uu = 0; uu < MAX_KEY_TABLE; uu++)
MACvDisableKeyEntry(pDevice,uu);
spin_unlock_irq(&pDevice->lock);
if ((pDevice->flags & DEVICE_FLAGS_UNPLUG) == false) {
MACbShutdown(pDevice);
}
netif_stop_queue(pDevice->dev);
MP_SET_FLAG(pDevice, fMP_DISCONNECTED);
MP_CLEAR_FLAG(pDevice, fMP_POST_WRITES);
MP_CLEAR_FLAG(pDevice, fMP_POST_READS);
pDevice->fKillEventPollingThread = true;
del_timer(&pDevice->sTimerCommand);
del_timer(&pMgmt->sTimerSecondCallback);
del_timer(&pDevice->sTimerTxData);
if (pDevice->bDiversityRegCtlON) {
del_timer(&pDevice->TimerSQ3Tmax1);
del_timer(&pDevice->TimerSQ3Tmax2);
del_timer(&pDevice->TimerSQ3Tmax3);
}
tasklet_kill(&pDevice->RxMngWorkItem);
tasklet_kill(&pDevice->ReadWorkItem);
tasklet_kill(&pDevice->EventWorkItem);
pDevice->bRoaming = false;
pDevice->bIsRoaming = false;
pDevice->bEnableRoaming = false;
pDevice->bCmdRunning = false;
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
pDevice->flags &= ~DEVICE_FLAGS_OPENED;
device_free_tx_bufs(pDevice);
device_free_rx_bufs(pDevice);
device_free_int_bufs(pDevice);
device_free_frag_bufs(pDevice);
usb_kill_urb(pDevice->pControlURB);
usb_kill_urb(pDevice->pInterruptURB);
usb_free_urb(pDevice->pControlURB);
usb_free_urb(pDevice->pInterruptURB);
BSSvClearNodeDBTable(pDevice, 0);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
return 0;
}
static void vt6656_disconnect(struct usb_interface *intf)
{
struct vnt_private *device = usb_get_intfdata(intf);
if (!device)
return;
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
device->flags |= DEVICE_FLAGS_UNPLUG;
if (device->dev) {
unregister_netdev(device->dev);
free_netdev(device->dev);
}
}
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev)
{
struct vnt_private *pDevice = netdev_priv(dev);
spin_lock_irq(&pDevice->lock);
if (unlikely(pDevice->bStopTx0Pkt))
dev_kfree_skb_irq(skb);
else
vDMA0_tx_80211(pDevice, skb);
spin_unlock_irq(&pDevice->lock);
return NETDEV_TX_OK;
}
static int device_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct net_device_stats *stats = &pDevice->stats;
spin_lock_irq(&pDevice->lock);
netif_stop_queue(dev);
if (!pDevice->bLinkPass) {
dev_kfree_skb_irq(skb);
goto out;
}
if (pDevice->bStopDataPkt) {
dev_kfree_skb_irq(skb);
stats->tx_dropped++;
goto out;
}
if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb)) {
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
}
out:
spin_unlock_irq(&pDevice->lock);
return NETDEV_TX_OK;
}
static unsigned const ethernet_polynomial = 0x04c11db7U;
static inline u32 ether_crc(int length, unsigned char *data)
{
int crc = -1;
while(--length >= 0) {
unsigned char current_octet = *data++;
int bit;
for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
crc = (crc << 1) ^
((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
}
}
return crc;
}
/* find out the start position of str2 from str1 */
static unsigned char *kstrstr(const unsigned char *str1,
const unsigned char *str2) {
int str1_len = strlen(str1);
int str2_len = strlen(str2);
while (str1_len >= str2_len) {
str1_len--;
if(memcmp(str1,str2,str2_len)==0)
return (unsigned char *) str1;
str1++;
}
return NULL;
}
static int Config_FileGetParameter(unsigned char *string,
unsigned char *dest,
unsigned char *source)
{
unsigned char buf1[100];
unsigned char buf2[100];
unsigned char *start_p = NULL, *end_p = NULL, *tmp_p = NULL;
int ii;
memset(buf1,0,100);
strcat(buf1, string);
strcat(buf1, "=");
source+=strlen(buf1);
/* find target string start point */
start_p = kstrstr(source,buf1);
if (start_p == NULL)
return false;
/* check if current config line is marked by "#" */
for (ii = 1; ; ii++) {
if (memcmp(start_p - ii, "\n", 1) == 0)
break;
if (memcmp(start_p - ii, "#", 1) == 0)
return false;
}
/* find target string end point */
end_p = kstrstr(start_p,"\n");
if (end_p == NULL) { /* can't find "\n", but don't care */
end_p = start_p + strlen(start_p); /* no include "\n" */
}
memset(buf2,0,100);
memcpy(buf2, start_p, end_p-start_p); /* get the target line */
buf2[end_p-start_p]='\0';
/* find value */
start_p = kstrstr(buf2,"=");
if (start_p == NULL)
return false;
memset(buf1,0,100);
strcpy(buf1,start_p+1);
/* except space */
tmp_p = buf1;
while(*tmp_p != 0x00) {
if(*tmp_p==' ')
tmp_p++;
else
break;
}
memcpy(dest,tmp_p,strlen(tmp_p));
return true;
}
/* if read fails, return NULL, or return data pointer */
static unsigned char *Config_FileOperation(struct vnt_private *pDevice)
{
unsigned char *buffer = kmalloc(1024, GFP_KERNEL);
struct file *file;
if (!buffer) {
printk("allocate mem for file fail?\n");
return NULL;
}
file = filp_open(CONFIG_PATH, O_RDONLY, 0);
if (IS_ERR(file)) {
kfree(buffer);
printk("Config_FileOperation file Not exist\n");
return NULL;
}
if (kernel_read(file, 0, buffer, 1024) < 0) {
printk("read file error?\n");
kfree(buffer);
buffer = NULL;
}
fput(file);
return buffer;
}
/* return --->-1:fail; >=0:successful */
static int Read_config_file(struct vnt_private *pDevice)
{
int result = 0;
unsigned char tmpbuffer[100];
unsigned char *buffer = NULL;
/* init config setting */
pDevice->config_file.ZoneType = -1;
pDevice->config_file.eAuthenMode = -1;
pDevice->config_file.eEncryptionStatus = -1;
buffer = Config_FileOperation(pDevice);
if (buffer == NULL) {
result =-1;
return result;
}
/* get zonetype */
{
memset(tmpbuffer,0,sizeof(tmpbuffer));
if(Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer) ==true) {
if(memcmp(tmpbuffer,"USA",3)==0) {
pDevice->config_file.ZoneType=ZoneType_USA;
}
else if(memcmp(tmpbuffer,"JAPAN",5)==0) {
pDevice->config_file.ZoneType=ZoneType_Japan;
}
else if(memcmp(tmpbuffer,"EUROPE",6)==0) {
pDevice->config_file.ZoneType=ZoneType_Europe;
}
else {
printk("Unknown Zonetype[%s]?\n",tmpbuffer);
}
}
}
/* get other parameter */
{
memset(tmpbuffer,0,sizeof(tmpbuffer));
if(Config_FileGetParameter("AUTHENMODE",tmpbuffer,buffer)==true) {
pDevice->config_file.eAuthenMode = (int) simple_strtol(tmpbuffer, NULL, 10);
}
memset(tmpbuffer,0,sizeof(tmpbuffer));
if(Config_FileGetParameter("ENCRYPTIONMODE",tmpbuffer,buffer)==true) {
pDevice->config_file.eEncryptionStatus= (int) simple_strtol(tmpbuffer, NULL, 10);
}
}
kfree(buffer);
return result;
}
static void device_set_multi(struct net_device *dev)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
struct netdev_hw_addr *ha;
u32 mc_filter[2];
int ii;
u8 pbyData[8] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 byTmpMode = 0;
int rc;
spin_lock_irq(&pDevice->lock);
rc = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
MAC_REG_RCR,
MESSAGE_REQUEST_MACREG,
1,
&byTmpMode
);
if (rc == 0) pDevice->byRxMode = byTmpMode;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode in= %x\n", pDevice->byRxMode);
if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
DBG_PRT(MSG_LEVEL_ERR,KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
/* unconditionally log net taps */
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
else if ((netdev_mc_count(dev) > pDevice->multicast_limit) ||
(dev->flags & IFF_ALLMULTI)) {
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
MAC_REG_MAR0,
MESSAGE_REQUEST_MACREG,
8,
pbyData
);
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
for (ii = 0; ii < 4; ii++) {
MACvWriteMultiAddr(pDevice, ii, *((u8 *)&mc_filter[0] + ii));
MACvWriteMultiAddr(pDevice, ii+ 4, *((u8 *)&mc_filter[1] + ii));
}
pDevice->byRxMode &= ~(RCR_UNICAST);
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
}
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
/*
* If AP mode, don't enable RCR_UNICAST since HW only compares
* addr1 with local MAC
*/
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
pDevice->byRxMode &= ~(RCR_UNICAST);
}
ControlvWriteByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_RCR, pDevice->byRxMode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode out= %x\n", pDevice->byRxMode);
spin_unlock_irq(&pDevice->lock);
}
static struct net_device_stats *device_get_stats(struct net_device *dev)
{
struct vnt_private *pDevice = netdev_priv(dev);
return &pDevice->stats;
}
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct vnt_private *pDevice = netdev_priv(dev);
struct iwreq *wrq = (struct iwreq *) rq;
int rc = 0;
switch (cmd) {
case IOCTL_CMD_HOSTAPD:
if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
rc = -EFAULT;
rc = vt6656_hostap_ioctl(pDevice, &wrq->u.data);
break;
case SIOCETHTOOL:
return ethtool_ioctl(dev, (void *) rq->ifr_data);
}
return rc;
}
static int ethtool_ioctl(struct net_device *dev, void *useraddr)
{
u32 ethcmd;
if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
return -EFAULT;
switch (ethcmd) {
case ETHTOOL_GDRVINFO: {
struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
strncpy(info.driver, DEVICE_NAME, sizeof(info.driver)-1);
strncpy(info.version, DEVICE_VERSION, sizeof(info.version)-1);
if (copy_to_user(useraddr, &info, sizeof(info)))
return -EFAULT;
return 0;
}
}
return -EOPNOTSUPP;
}
MODULE_DEVICE_TABLE(usb, vt6656_table);
static struct usb_driver vt6656_driver = {
.name = DEVICE_NAME,
.probe = vt6656_probe,
.disconnect = vt6656_disconnect,
.id_table = vt6656_table,
#ifdef CONFIG_PM
.suspend = vt6656_suspend,
.resume = vt6656_resume,
#endif /* CONFIG_PM */
};
module_usb_driver(vt6656_driver);
| gpl-2.0 |
wenfengliaoshuzhai/linux | crypto/async_tx/async_tx.c | 2398 | 7841 | /*
* core routines for the asynchronous memory transfer/transform api
*
* Copyright © 2006, Intel Corporation.
*
* Dan Williams <dan.j.williams@intel.com>
*
* with architecture considerations by:
* Neil Brown <neilb@suse.de>
* Jeff Garzik <jeff@garzik.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/rculist.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/async_tx.h>
#ifdef CONFIG_DMA_ENGINE
static int __init async_tx_init(void)
{
async_dmaengine_get();
printk(KERN_INFO "async_tx: api initialized (async)\n");
return 0;
}
static void __exit async_tx_exit(void)
{
async_dmaengine_put();
}
module_init(async_tx_init);
module_exit(async_tx_exit);
/**
* __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously
* @submit: transaction dependency and submission modifiers
* @tx_type: transaction type
*/
struct dma_chan *
__async_tx_find_channel(struct async_submit_ctl *submit,
enum dma_transaction_type tx_type)
{
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
/* see if we can keep the chain on one channel */
if (depend_tx &&
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
return depend_tx->chan;
return async_dma_find_channel(tx_type);
}
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#endif
/**
* async_tx_channel_switch - queue an interrupt descriptor with a dependency
* pre-attached.
* @depend_tx: the operation that must finish before the new operation runs
* @tx: the new operation
*/
static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_async_tx_descriptor *tx)
{
struct dma_chan *chan = depend_tx->chan;
struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
/* first check to see if we can still append to depend_tx */
txd_lock(depend_tx);
if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
txd_chain(depend_tx, tx);
intr_tx = NULL;
}
txd_unlock(depend_tx);
/* attached dependency, flush the parent channel */
if (!intr_tx) {
device->device_issue_pending(chan);
return;
}
/* see if we can schedule an interrupt
* otherwise poll for completion
*/
if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
intr_tx = device->device_prep_dma_interrupt(chan, 0);
else
intr_tx = NULL;
if (intr_tx) {
intr_tx->callback = NULL;
intr_tx->callback_param = NULL;
/* safe to chain outside the lock since we know we are
* not submitted yet
*/
txd_chain(intr_tx, tx);
/* check if we need to append */
txd_lock(depend_tx);
if (txd_parent(depend_tx)) {
txd_chain(depend_tx, intr_tx);
async_tx_ack(intr_tx);
intr_tx = NULL;
}
txd_unlock(depend_tx);
if (intr_tx) {
txd_clear_parent(intr_tx);
intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx);
}
device->device_issue_pending(chan);
} else {
if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
}
}
/**
* submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
*
* while holding depend_tx->lock we must avoid submitting new operations
* to prevent a circular locking dependency with drivers that already
* hold a channel lock when calling async_tx_run_dependencies.
*/
enum submit_disposition {
ASYNC_TX_SUBMITTED,
ASYNC_TX_CHANNEL_SWITCH,
ASYNC_TX_DIRECT_SUBMIT,
};
void
async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
tx->callback = submit->cb_fn;
tx->callback_param = submit->cb_param;
if (depend_tx) {
enum submit_disposition s;
/* sanity check the dependency chain:
* 1/ if ack is already set then we cannot be sure
* we are referring to the correct operation
* 2/ dependencies are 1:1 i.e. two transactions can
* not depend on the same parent
*/
BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
txd_parent(tx));
/* the lock prevents async_tx_run_dependencies from missing
* the setting of ->next when ->parent != NULL
*/
txd_lock(depend_tx);
if (txd_parent(depend_tx)) {
/* we have a parent so we can not submit directly
* if we are staying on the same channel: append
* else: channel switch
*/
if (depend_tx->chan == chan) {
txd_chain(depend_tx, tx);
s = ASYNC_TX_SUBMITTED;
} else
s = ASYNC_TX_CHANNEL_SWITCH;
} else {
/* we do not have a parent so we may be able to submit
* directly if we are staying on the same channel
*/
if (depend_tx->chan == chan)
s = ASYNC_TX_DIRECT_SUBMIT;
else
s = ASYNC_TX_CHANNEL_SWITCH;
}
txd_unlock(depend_tx);
switch (s) {
case ASYNC_TX_SUBMITTED:
break;
case ASYNC_TX_CHANNEL_SWITCH:
async_tx_channel_switch(depend_tx, tx);
break;
case ASYNC_TX_DIRECT_SUBMIT:
txd_clear_parent(tx);
tx->tx_submit(tx);
break;
}
} else {
txd_clear_parent(tx);
tx->tx_submit(tx);
}
if (submit->flags & ASYNC_TX_ACK)
async_tx_ack(tx);
if (depend_tx)
async_tx_ack(depend_tx);
}
EXPORT_SYMBOL_GPL(async_tx_submit);
/**
* async_trigger_callback - schedules the callback function to be run
* @submit: submission and completion parameters
*
* honored flags: ASYNC_TX_ACK
*
* The callback is run after any dependent operations have completed.
*/
struct dma_async_tx_descriptor *
async_trigger_callback(struct async_submit_ctl *submit)
{
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *tx;
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
if (depend_tx) {
chan = depend_tx->chan;
device = chan->device;
/* see if we can schedule an interrupt
* otherwise poll for completion
*/
if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
device = NULL;
tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
} else
tx = NULL;
if (tx) {
pr_debug("%s: (async)\n", __func__);
async_tx_submit(chan, tx, submit);
} else {
pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
async_tx_sync_epilog(submit);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_trigger_callback);
/**
* async_tx_quiesce - ensure tx is complete and freeable upon return
* @tx - transaction to quiesce
*/
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
if (*tx) {
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for transaction\n",
__func__);
async_tx_ack(*tx);
*tx = NULL;
}
}
EXPORT_SYMBOL_GPL(async_tx_quiesce);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
MODULE_LICENSE("GPL");
| gpl-2.0 |
yevmel/P8000-Kernel | drivers/media/v4l2-core/v4l2-event.c | 2654 | 7641 | /*
* v4l2-event.c
*
* V4L2 events.
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/export.h>
static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
{
idx += sev->first;
return idx >= sev->elems ? idx - sev->elems : idx;
}
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
{
struct v4l2_kevent *kev;
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
if (list_empty(&fh->available)) {
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return -ENOENT;
}
WARN_ON(fh->navailable == 0);
kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
list_del(&kev->list);
fh->navailable--;
kev->event.pending = fh->navailable;
*event = kev->event;
kev->sev->first = sev_pos(kev->sev, 1);
kev->sev->in_use--;
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return 0;
}
int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
int nonblocking)
{
int ret;
if (nonblocking)
return __v4l2_event_dequeue(fh, event);
/* Release the vdev lock while waiting */
if (fh->vdev->lock)
mutex_unlock(fh->vdev->lock);
do {
ret = wait_event_interruptible(fh->wait,
fh->navailable != 0);
if (ret < 0)
break;
ret = __v4l2_event_dequeue(fh, event);
} while (ret == -ENOENT);
if (fh->vdev->lock)
mutex_lock(fh->vdev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
/* Caller must hold fh->vdev->fh_lock! */
static struct v4l2_subscribed_event *v4l2_event_subscribed(
struct v4l2_fh *fh, u32 type, u32 id)
{
struct v4l2_subscribed_event *sev;
assert_spin_locked(&fh->vdev->fh_lock);
list_for_each_entry(sev, &fh->subscribed, list)
if (sev->type == type && sev->id == id)
return sev;
return NULL;
}
static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
const struct timespec *ts)
{
struct v4l2_subscribed_event *sev;
struct v4l2_kevent *kev;
bool copy_payload = true;
/* Are we subscribed? */
sev = v4l2_event_subscribed(fh, ev->type, ev->id);
if (sev == NULL)
return;
/*
* If the event has been added to the fh->subscribed list, but its
* add op has not completed yet elems will be 0, treat this as
* not being subscribed.
*/
if (!sev->elems)
return;
/* Increase event sequence number on fh. */
fh->sequence++;
/* Do we have any free events? */
if (sev->in_use == sev->elems) {
/* no, remove the oldest one */
kev = sev->events + sev_pos(sev, 0);
list_del(&kev->list);
sev->in_use--;
sev->first = sev_pos(sev, 1);
fh->navailable--;
if (sev->elems == 1) {
if (sev->ops && sev->ops->replace) {
sev->ops->replace(&kev->event, ev);
copy_payload = false;
}
} else if (sev->ops && sev->ops->merge) {
struct v4l2_kevent *second_oldest =
sev->events + sev_pos(sev, 0);
sev->ops->merge(&kev->event, &second_oldest->event);
}
}
/* Take one and fill it. */
kev = sev->events + sev_pos(sev, sev->in_use);
kev->event.type = ev->type;
if (copy_payload)
kev->event.u = ev->u;
kev->event.id = ev->id;
kev->event.timestamp = *ts;
kev->event.sequence = fh->sequence;
sev->in_use++;
list_add_tail(&kev->list, &fh->available);
fh->navailable++;
wake_up_all(&fh->wait);
}
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
{
struct v4l2_fh *fh;
unsigned long flags;
struct timespec timestamp;
ktime_get_ts(×tamp);
spin_lock_irqsave(&vdev->fh_lock, flags);
list_for_each_entry(fh, &vdev->fh_list, list)
__v4l2_event_queue_fh(fh, ev, ×tamp);
spin_unlock_irqrestore(&vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_event_queue);
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
{
unsigned long flags;
struct timespec timestamp;
ktime_get_ts(×tamp);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
__v4l2_event_queue_fh(fh, ev, ×tamp);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
int v4l2_event_pending(struct v4l2_fh *fh)
{
return fh->navailable;
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
int v4l2_event_subscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub, unsigned elems,
const struct v4l2_subscribed_event_ops *ops)
{
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
if (elems < 1)
elems = 1;
sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
if (!sev)
return -ENOMEM;
for (i = 0; i < elems; i++)
sev->events[i].sev = sev;
sev->type = sub->type;
sev->id = sub->id;
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (!found_ev)
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
kfree(sev);
return 0; /* Already listening */
}
if (sev->ops && sev->ops->add) {
int ret = sev->ops->add(sev, elems);
if (ret) {
sev->ops = NULL;
v4l2_event_unsubscribe(fh, sub);
return ret;
}
}
/* Mark as ready for use */
sev->elems = elems;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
{
struct v4l2_event_subscription sub;
struct v4l2_subscribed_event *sev;
unsigned long flags;
do {
sev = NULL;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
if (!list_empty(&fh->subscribed)) {
sev = list_first_entry(&fh->subscribed,
struct v4l2_subscribed_event, list);
sub.type = sev->type;
sub.id = sev->id;
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (sev)
v4l2_event_unsubscribe(fh, &sub);
} while (sev);
}
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
struct v4l2_subscribed_event *sev;
unsigned long flags;
int i;
if (sub->type == V4L2_EVENT_ALL) {
v4l2_event_unsubscribe_all(fh);
return 0;
}
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (sev != NULL) {
/* Remove any pending events for this subscription */
for (i = 0; i < sev->in_use; i++) {
list_del(&sev->events[sev_pos(sev, i)].list);
fh->navailable--;
}
list_del(&sev->list);
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
kfree(sev);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
| gpl-2.0 |
basr/Brieuwers_Kernel | drivers/hid/hid-emsff.c | 2654 | 3915 | /*
* Force feedback support for EMS Trio Linker Plus II
*
* Copyright (c) 2010 Ignaz Forster <ignaz.forster@gmx.de>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/usb.h>
#include "hid-ids.h"
#include "usbhid/usbhid.h"
struct emsff_device {
struct hid_report *report;
};
static int emsff_play(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct hid_device *hid = input_get_drvdata(dev);
struct emsff_device *emsff = data;
int weak, strong;
weak = effect->u.rumble.weak_magnitude;
strong = effect->u.rumble.strong_magnitude;
dbg_hid("called with 0x%04x 0x%04x\n", strong, weak);
weak = weak * 0xff / 0xffff;
strong = strong * 0xff / 0xffff;
emsff->report->field[0]->value[1] = weak;
emsff->report->field[0]->value[2] = strong;
dbg_hid("running with 0x%02x 0x%02x\n", strong, weak);
usbhid_submit_report(hid, emsff->report, USB_DIR_OUT);
return 0;
}
static int emsff_init(struct hid_device *hid)
{
struct emsff_device *emsff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs,
struct hid_input, list);
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
int error;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
}
report = list_first_entry(report_list, struct hid_report, list);
if (report->maxfield < 1) {
hid_err(hid, "no fields in the report\n");
return -ENODEV;
}
if (report->field[0]->report_count < 7) {
hid_err(hid, "not enough values in the field\n");
return -ENODEV;
}
emsff = kzalloc(sizeof(struct emsff_device), GFP_KERNEL);
if (!emsff)
return -ENOMEM;
set_bit(FF_RUMBLE, dev->ffbit);
error = input_ff_create_memless(dev, emsff, emsff_play);
if (error) {
kfree(emsff);
return error;
}
emsff->report = report;
emsff->report->field[0]->value[0] = 0x01;
emsff->report->field[0]->value[1] = 0x00;
emsff->report->field[0]->value[2] = 0x00;
emsff->report->field[0]->value[3] = 0x00;
emsff->report->field[0]->value[4] = 0x00;
emsff->report->field[0]->value[5] = 0x00;
emsff->report->field[0]->value[6] = 0x00;
usbhid_submit_report(hid, emsff->report, USB_DIR_OUT);
hid_info(hid, "force feedback for EMS based devices by Ignaz Forster <ignaz.forster@gmx.de>\n");
return 0;
}
static int ems_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
hid_err(hdev, "hw start failed\n");
goto err;
}
emsff_init(hdev);
return 0;
err:
return ret;
}
static const struct hid_device_id ems_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, 0x118) },
{ }
};
MODULE_DEVICE_TABLE(hid, ems_devices);
static struct hid_driver ems_driver = {
.name = "hkems",
.id_table = ems_devices,
.probe = ems_probe,
};
static int ems_init(void)
{
return hid_register_driver(&ems_driver);
}
static void ems_exit(void)
{
hid_unregister_driver(&ems_driver);
}
module_init(ems_init);
module_exit(ems_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
zarboz/m8wlv | drivers/net/can/usb/peak_usb/pcan_usb.c | 3422 | 20217 | /*
* CAN driver for PEAK System PCAN-USB adapter
* Derived from the PCAN project file driver/src/pcan_usb.c
*
* Copyright (C) 2003-2010 PEAK System-Technik GmbH
* Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published
* by the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/netdevice.h>
#include <linux/usb.h>
#include <linux/module.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include "pcan_usb_core.h"
MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
/* PCAN-USB Endpoints */
#define PCAN_USB_EP_CMDOUT 1
#define PCAN_USB_EP_CMDIN (PCAN_USB_EP_CMDOUT | USB_DIR_IN)
#define PCAN_USB_EP_MSGOUT 2
#define PCAN_USB_EP_MSGIN (PCAN_USB_EP_MSGOUT | USB_DIR_IN)
/* PCAN-USB command struct */
#define PCAN_USB_CMD_FUNC 0
#define PCAN_USB_CMD_NUM 1
#define PCAN_USB_CMD_ARGS 2
#define PCAN_USB_CMD_ARGS_LEN 14
#define PCAN_USB_CMD_LEN (PCAN_USB_CMD_ARGS + \
PCAN_USB_CMD_ARGS_LEN)
/* PCAN-USB command timeout (ms.) */
#define PCAN_USB_COMMAND_TIMEOUT 1000
/* PCAN-USB startup timeout (ms.) */
#define PCAN_USB_STARTUP_TIMEOUT 10
/* PCAN-USB rx/tx buffers size */
#define PCAN_USB_RX_BUFFER_SIZE 64
#define PCAN_USB_TX_BUFFER_SIZE 64
#define PCAN_USB_MSG_HEADER_LEN 2
/* PCAN-USB adapter internal clock (MHz) */
#define PCAN_USB_CRYSTAL_HZ 16000000
/* PCAN-USB USB message record status/len field */
#define PCAN_USB_STATUSLEN_TIMESTAMP (1 << 7)
#define PCAN_USB_STATUSLEN_INTERNAL (1 << 6)
#define PCAN_USB_STATUSLEN_EXT_ID (1 << 5)
#define PCAN_USB_STATUSLEN_RTR (1 << 4)
#define PCAN_USB_STATUSLEN_DLC (0xf)
/* PCAN-USB error flags */
#define PCAN_USB_ERROR_TXFULL 0x01
#define PCAN_USB_ERROR_RXQOVR 0x02
#define PCAN_USB_ERROR_BUS_LIGHT 0x04
#define PCAN_USB_ERROR_BUS_HEAVY 0x08
#define PCAN_USB_ERROR_BUS_OFF 0x10
#define PCAN_USB_ERROR_RXQEMPTY 0x20
#define PCAN_USB_ERROR_QOVR 0x40
#define PCAN_USB_ERROR_TXQFULL 0x80
/* SJA1000 modes */
#define SJA1000_MODE_NORMAL 0x00
#define SJA1000_MODE_INIT 0x01
/*
* tick duration = 42.666 us =>
* (tick_number * 44739243) >> 20 ~ (tick_number * 42666) / 1000
* accuracy = 10^-7
*/
#define PCAN_USB_TS_DIV_SHIFTER 20
#define PCAN_USB_TS_US_PER_TICK 44739243
/* PCAN-USB messages record types */
#define PCAN_USB_REC_ERROR 1
#define PCAN_USB_REC_ANALOG 2
#define PCAN_USB_REC_BUSLOAD 3
#define PCAN_USB_REC_TS 4
#define PCAN_USB_REC_BUSEVT 5
/* private to PCAN-USB adapter */
struct pcan_usb {
struct peak_usb_device dev;
struct peak_time_ref time_ref;
struct timer_list restart_timer;
};
/* incoming message context for decoding */
struct pcan_usb_msg_context {
u16 ts16;
u8 prev_ts8;
u8 *ptr;
u8 *end;
u8 rec_cnt;
u8 rec_idx;
u8 rec_data_idx;
struct net_device *netdev;
struct pcan_usb *pdev;
};
/*
* send a command
*/
static int pcan_usb_send_cmd(struct peak_usb_device *dev, u8 f, u8 n, u8 *p)
{
int err;
int actual_length;
/* usb device unregistered? */
if (!(dev->state & PCAN_USB_STATE_CONNECTED))
return 0;
dev->cmd_buf[PCAN_USB_CMD_FUNC] = f;
dev->cmd_buf[PCAN_USB_CMD_NUM] = n;
if (p)
memcpy(dev->cmd_buf + PCAN_USB_CMD_ARGS,
p, PCAN_USB_CMD_ARGS_LEN);
err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev, PCAN_USB_EP_CMDOUT),
dev->cmd_buf, PCAN_USB_CMD_LEN, &actual_length,
PCAN_USB_COMMAND_TIMEOUT);
if (err)
netdev_err(dev->netdev,
"sending cmd f=0x%x n=0x%x failure: %d\n",
f, n, err);
return err;
}
/*
* send a command then wait for its response
*/
static int pcan_usb_wait_rsp(struct peak_usb_device *dev, u8 f, u8 n, u8 *p)
{
int err;
int actual_length;
/* usb device unregistered? */
if (!(dev->state & PCAN_USB_STATE_CONNECTED))
return 0;
/* first, send command */
err = pcan_usb_send_cmd(dev, f, n, NULL);
if (err)
return err;
err = usb_bulk_msg(dev->udev,
usb_rcvbulkpipe(dev->udev, PCAN_USB_EP_CMDIN),
dev->cmd_buf, PCAN_USB_CMD_LEN, &actual_length,
PCAN_USB_COMMAND_TIMEOUT);
if (err)
netdev_err(dev->netdev,
"waiting rsp f=0x%x n=0x%x failure: %d\n", f, n, err);
else if (p)
memcpy(p, dev->cmd_buf + PCAN_USB_CMD_ARGS,
PCAN_USB_CMD_ARGS_LEN);
return err;
}
static int pcan_usb_set_sja1000(struct peak_usb_device *dev, u8 mode)
{
u8 args[PCAN_USB_CMD_ARGS_LEN] = {
[1] = mode,
};
return pcan_usb_send_cmd(dev, 9, 2, args);
}
static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff)
{
u8 args[PCAN_USB_CMD_ARGS_LEN] = {
[0] = !!onoff,
};
return pcan_usb_send_cmd(dev, 3, 2, args);
}
static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff)
{
u8 args[PCAN_USB_CMD_ARGS_LEN] = {
[0] = !!onoff,
};
return pcan_usb_send_cmd(dev, 3, 3, args);
}
static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff)
{
u8 args[PCAN_USB_CMD_ARGS_LEN] = {
[0] = !!onoff,
};
return pcan_usb_send_cmd(dev, 10, 2, args);
}
/*
* set bittiming value to can
*/
static int pcan_usb_set_bittiming(struct peak_usb_device *dev,
struct can_bittiming *bt)
{
u8 args[PCAN_USB_CMD_ARGS_LEN];
u8 btr0, btr1;
btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
(((bt->phase_seg2 - 1) & 0x7) << 4);
if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
btr1 |= 0x80;
netdev_info(dev->netdev, "setting BTR0=0x%02x BTR1=0x%02x\n",
btr0, btr1);
args[0] = btr1;
args[1] = btr0;
return pcan_usb_send_cmd(dev, 1, 2, args);
}
/*
* init/reset can
*/
static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
{
int err;
err = pcan_usb_set_bus(dev, onoff);
if (err)
return err;
if (!onoff) {
err = pcan_usb_set_sja1000(dev, SJA1000_MODE_INIT);
} else {
/* the PCAN-USB needs time to init */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
}
return err;
}
/*
* handle end of waiting for the device to reset
*/
static void pcan_usb_restart(unsigned long arg)
{
/* notify candev and netdev */
peak_usb_restart_complete((struct peak_usb_device *)arg);
}
/*
* handle the submission of the restart urb
*/
static void pcan_usb_restart_pending(struct urb *urb)
{
struct pcan_usb *pdev = urb->context;
/* the PCAN-USB needs time to restart */
mod_timer(&pdev->restart_timer,
jiffies + msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
/* can delete usb resources */
peak_usb_async_complete(urb);
}
/*
* handle asynchronous restart
*/
static int pcan_usb_restart_async(struct peak_usb_device *dev, struct urb *urb,
u8 *buf)
{
struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
if (timer_pending(&pdev->restart_timer))
return -EBUSY;
/* set bus on */
buf[PCAN_USB_CMD_FUNC] = 3;
buf[PCAN_USB_CMD_NUM] = 2;
buf[PCAN_USB_CMD_ARGS] = 1;
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev, PCAN_USB_EP_CMDOUT),
buf, PCAN_USB_CMD_LEN,
pcan_usb_restart_pending, pdev);
return usb_submit_urb(urb, GFP_ATOMIC);
}
/*
* read serial number from device
*/
static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
{
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
err = pcan_usb_wait_rsp(dev, 6, 1, args);
if (err) {
netdev_err(dev->netdev, "getting serial failure: %d\n", err);
} else if (serial_number) {
u32 tmp32;
memcpy(&tmp32, args, 4);
*serial_number = le32_to_cpu(tmp32);
}
return err;
}
/*
* read device id from device
*/
static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
{
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
err = pcan_usb_wait_rsp(dev, 4, 1, args);
if (err)
netdev_err(dev->netdev, "getting device id failure: %d\n", err);
else if (device_id)
*device_id = args[0];
return err;
}
/*
* update current time ref with received timestamp
*/
static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc)
{
u16 tmp16;
if ((mc->ptr+2) > mc->end)
return -EINVAL;
memcpy(&tmp16, mc->ptr, 2);
mc->ts16 = le16_to_cpu(tmp16);
if (mc->rec_idx > 0)
peak_usb_update_ts_now(&mc->pdev->time_ref, mc->ts16);
else
peak_usb_set_ts_now(&mc->pdev->time_ref, mc->ts16);
return 0;
}
/*
* decode received timestamp
*/
static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet)
{
/* only 1st packet supplies a word timestamp */
if (first_packet) {
u16 tmp16;
if ((mc->ptr + 2) > mc->end)
return -EINVAL;
memcpy(&tmp16, mc->ptr, 2);
mc->ptr += 2;
mc->ts16 = le16_to_cpu(tmp16);
mc->prev_ts8 = mc->ts16 & 0x00ff;
} else {
u8 ts8;
if ((mc->ptr + 1) > mc->end)
return -EINVAL;
ts8 = *mc->ptr++;
if (ts8 < mc->prev_ts8)
mc->ts16 += 0x100;
mc->ts16 &= 0xff00;
mc->ts16 |= ts8;
mc->prev_ts8 = ts8;
}
return 0;
}
static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
u8 status_len)
{
struct sk_buff *skb;
struct can_frame *cf;
struct timeval tv;
enum can_state new_state;
/* ignore this error until 1st ts received */
if (n == PCAN_USB_ERROR_QOVR)
if (!mc->pdev->time_ref.tick_count)
return 0;
new_state = mc->pdev->dev.can.state;
switch (mc->pdev->dev.can.state) {
case CAN_STATE_ERROR_ACTIVE:
if (n & PCAN_USB_ERROR_BUS_LIGHT) {
new_state = CAN_STATE_ERROR_WARNING;
break;
}
case CAN_STATE_ERROR_WARNING:
if (n & PCAN_USB_ERROR_BUS_HEAVY) {
new_state = CAN_STATE_ERROR_PASSIVE;
break;
}
if (n & PCAN_USB_ERROR_BUS_OFF) {
new_state = CAN_STATE_BUS_OFF;
break;
}
if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
/*
* trick to bypass next comparison and process other
* errors
*/
new_state = CAN_STATE_MAX;
break;
}
if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
/* no error (back to active state) */
mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
}
break;
case CAN_STATE_ERROR_PASSIVE:
if (n & PCAN_USB_ERROR_BUS_OFF) {
new_state = CAN_STATE_BUS_OFF;
break;
}
if (n & PCAN_USB_ERROR_BUS_LIGHT) {
new_state = CAN_STATE_ERROR_WARNING;
break;
}
if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
/*
* trick to bypass next comparison and process other
* errors
*/
new_state = CAN_STATE_MAX;
break;
}
if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
/* no error (back to active state) */
mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
}
break;
default:
/* do nothing waiting for restart */
return 0;
}
/* donot post any error if current state didn't change */
if (mc->pdev->dev.can.state == new_state)
return 0;
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(mc->netdev, &cf);
if (!skb)
return -ENOMEM;
switch (new_state) {
case CAN_STATE_BUS_OFF:
cf->can_id |= CAN_ERR_BUSOFF;
can_bus_off(mc->netdev);
break;
case CAN_STATE_ERROR_PASSIVE:
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE |
CAN_ERR_CRTL_RX_PASSIVE;
mc->pdev->dev.can.can_stats.error_passive++;
break;
case CAN_STATE_ERROR_WARNING:
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_TX_WARNING |
CAN_ERR_CRTL_RX_WARNING;
mc->pdev->dev.can.can_stats.error_warning++;
break;
default:
/* CAN_STATE_MAX (trick to handle other errors) */
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
mc->netdev->stats.rx_over_errors++;
mc->netdev->stats.rx_errors++;
new_state = mc->pdev->dev.can.state;
break;
}
mc->pdev->dev.can.state = new_state;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
skb->tstamp = timeval_to_ktime(tv);
}
netif_rx(skb);
mc->netdev->stats.rx_packets++;
mc->netdev->stats.rx_bytes += cf->can_dlc;
return 0;
}
/*
* decode non-data usb message
*/
static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
u8 status_len)
{
u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC;
u8 f, n;
int err;
/* check whether function and number can be read */
if ((mc->ptr + 2) > mc->end)
return -EINVAL;
f = mc->ptr[PCAN_USB_CMD_FUNC];
n = mc->ptr[PCAN_USB_CMD_NUM];
mc->ptr += PCAN_USB_CMD_ARGS;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
int err = pcan_usb_decode_ts(mc, !mc->rec_idx);
if (err)
return err;
}
switch (f) {
case PCAN_USB_REC_ERROR:
err = pcan_usb_decode_error(mc, n, status_len);
if (err)
return err;
break;
case PCAN_USB_REC_ANALOG:
/* analog values (ignored) */
rec_len = 2;
break;
case PCAN_USB_REC_BUSLOAD:
/* bus load (ignored) */
rec_len = 1;
break;
case PCAN_USB_REC_TS:
/* only timestamp */
if (pcan_usb_update_ts(mc))
return -EINVAL;
break;
case PCAN_USB_REC_BUSEVT:
/* error frame/bus event */
if (n & PCAN_USB_ERROR_TXQFULL)
netdev_dbg(mc->netdev, "device Tx queue full)\n");
break;
default:
netdev_err(mc->netdev, "unexpected function %u\n", f);
break;
}
if ((mc->ptr + rec_len) > mc->end)
return -EINVAL;
mc->ptr += rec_len;
return 0;
}
/*
* decode data usb message
*/
static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
{
u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC;
struct sk_buff *skb;
struct can_frame *cf;
struct timeval tv;
skb = alloc_can_skb(mc->netdev, &cf);
if (!skb)
return -ENOMEM;
if (status_len & PCAN_USB_STATUSLEN_EXT_ID) {
u32 tmp32;
if ((mc->ptr + 4) > mc->end)
goto decode_failed;
memcpy(&tmp32, mc->ptr, 4);
mc->ptr += 4;
cf->can_id = le32_to_cpu(tmp32 >> 3) | CAN_EFF_FLAG;
} else {
u16 tmp16;
if ((mc->ptr + 2) > mc->end)
goto decode_failed;
memcpy(&tmp16, mc->ptr, 2);
mc->ptr += 2;
cf->can_id = le16_to_cpu(tmp16 >> 5);
}
cf->can_dlc = get_can_dlc(rec_len);
/* first data packet timestamp is a word */
if (pcan_usb_decode_ts(mc, !mc->rec_data_idx))
goto decode_failed;
/* read data */
memset(cf->data, 0x0, sizeof(cf->data));
if (status_len & PCAN_USB_STATUSLEN_RTR) {
cf->can_id |= CAN_RTR_FLAG;
} else {
if ((mc->ptr + rec_len) > mc->end)
goto decode_failed;
memcpy(cf->data, mc->ptr, rec_len);
mc->ptr += rec_len;
}
/* convert timestamp into kernel time */
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
skb->tstamp = timeval_to_ktime(tv);
/* push the skb */
netif_rx(skb);
/* update statistics */
mc->netdev->stats.rx_packets++;
mc->netdev->stats.rx_bytes += cf->can_dlc;
return 0;
decode_failed:
dev_kfree_skb(skb);
return -EINVAL;
}
/*
* process incoming message
*/
static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
{
struct pcan_usb_msg_context mc = {
.rec_cnt = ibuf[1],
.ptr = ibuf + PCAN_USB_MSG_HEADER_LEN,
.end = ibuf + lbuf,
.netdev = dev->netdev,
.pdev = container_of(dev, struct pcan_usb, dev),
};
int err;
for (err = 0; mc.rec_idx < mc.rec_cnt && !err; mc.rec_idx++) {
u8 sl = *mc.ptr++;
/* handle status and error frames here */
if (sl & PCAN_USB_STATUSLEN_INTERNAL) {
err = pcan_usb_decode_status(&mc, sl);
/* handle normal can frames here */
} else {
err = pcan_usb_decode_data(&mc, sl);
mc.rec_data_idx++;
}
}
return err;
}
/*
* process any incoming buffer
*/
static int pcan_usb_decode_buf(struct peak_usb_device *dev, struct urb *urb)
{
int err = 0;
if (urb->actual_length > PCAN_USB_MSG_HEADER_LEN) {
err = pcan_usb_decode_msg(dev, urb->transfer_buffer,
urb->actual_length);
} else if (urb->actual_length > 0) {
netdev_err(dev->netdev, "usb message length error (%u)\n",
urb->actual_length);
err = -EINVAL;
}
return err;
}
/*
* process outgoing packet
*/
static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
u8 *obuf, size_t *size)
{
struct net_device *netdev = dev->netdev;
struct net_device_stats *stats = &netdev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
u8 *pc;
obuf[0] = 2;
obuf[1] = 1;
pc = obuf + PCAN_USB_MSG_HEADER_LEN;
/* status/len byte */
*pc = cf->can_dlc;
if (cf->can_id & CAN_RTR_FLAG)
*pc |= PCAN_USB_STATUSLEN_RTR;
/* can id */
if (cf->can_id & CAN_EFF_FLAG) {
__le32 tmp32 = cpu_to_le32((cf->can_id & CAN_ERR_MASK) << 3);
*pc |= PCAN_USB_STATUSLEN_EXT_ID;
memcpy(++pc, &tmp32, 4);
pc += 4;
} else {
__le16 tmp16 = cpu_to_le16((cf->can_id & CAN_ERR_MASK) << 5);
memcpy(++pc, &tmp16, 2);
pc += 2;
}
/* can data */
if (!(cf->can_id & CAN_RTR_FLAG)) {
memcpy(pc, cf->data, cf->can_dlc);
pc += cf->can_dlc;
}
obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff);
return 0;
}
/*
* start interface
*/
static int pcan_usb_start(struct peak_usb_device *dev)
{
struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
/* number of bits used in timestamps read from adapter struct */
peak_usb_init_time_ref(&pdev->time_ref, &pcan_usb);
/* if revision greater than 3, can put silent mode on/off */
if (dev->device_rev > 3) {
int err;
err = pcan_usb_set_silent(dev,
dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY);
if (err)
return err;
}
return pcan_usb_set_ext_vcc(dev, 0);
}
static int pcan_usb_init(struct peak_usb_device *dev)
{
struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
u32 serial_number;
int err;
/* initialize a timer needed to wait for hardware restart */
init_timer(&pdev->restart_timer);
pdev->restart_timer.function = pcan_usb_restart;
pdev->restart_timer.data = (unsigned long)dev;
/*
* explicit use of dev_xxx() instead of netdev_xxx() here:
* information displayed are related to the device itself, not
* to the canx netdevice.
*/
err = pcan_usb_get_serial(dev, &serial_number);
if (err) {
dev_err(dev->netdev->dev.parent,
"unable to read %s serial number (err %d)\n",
pcan_usb.name, err);
return err;
}
dev_info(dev->netdev->dev.parent,
"PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
pcan_usb.name, dev->device_rev, serial_number,
pcan_usb.ctrl_count);
return 0;
}
/*
* probe function for new PCAN-USB usb interface
*/
static int pcan_usb_probe(struct usb_interface *intf)
{
struct usb_host_interface *if_desc;
int i;
if_desc = intf->altsetting;
/* check interface endpoint addresses */
for (i = 0; i < if_desc->desc.bNumEndpoints; i++) {
struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc;
switch (ep->bEndpointAddress) {
case PCAN_USB_EP_CMDOUT:
case PCAN_USB_EP_CMDIN:
case PCAN_USB_EP_MSGOUT:
case PCAN_USB_EP_MSGIN:
break;
default:
return -ENODEV;
}
}
return 0;
}
/*
* describe the PCAN-USB adapter
*/
struct peak_usb_adapter pcan_usb = {
.name = "PCAN-USB",
.device_id = PCAN_USB_PRODUCT_ID,
.ctrl_count = 1,
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
},
.bittiming_const = {
.name = "pcan_usb",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 64,
.brp_inc = 1,
},
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb),
/* timestamps usage */
.ts_used_bits = 16,
.ts_period = 24575, /* calibration period in ts. */
.us_per_ts_scale = PCAN_USB_TS_US_PER_TICK, /* us=(ts*scale) */
.us_per_ts_shift = PCAN_USB_TS_DIV_SHIFTER, /* >> shift */
/* give here messages in/out endpoints */
.ep_msg_in = PCAN_USB_EP_MSGIN,
.ep_msg_out = {PCAN_USB_EP_MSGOUT},
/* size of rx/tx usb buffers */
.rx_buffer_size = PCAN_USB_RX_BUFFER_SIZE,
.tx_buffer_size = PCAN_USB_TX_BUFFER_SIZE,
/* device callbacks */
.intf_probe = pcan_usb_probe,
.dev_init = pcan_usb_init,
.dev_set_bus = pcan_usb_write_mode,
.dev_set_bittiming = pcan_usb_set_bittiming,
.dev_get_device_id = pcan_usb_get_device_id,
.dev_decode_buf = pcan_usb_decode_buf,
.dev_encode_msg = pcan_usb_encode_msg,
.dev_start = pcan_usb_start,
.dev_restart_async = pcan_usb_restart_async,
};
| gpl-2.0 |
rebelde-/kyubi | drivers/net/ni5010.c | 3934 | 22862 | /* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard.
*
* Copyright 1996,1997,2006 Jan-Pascal van Best and Andreas Mohr.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* The authors may be reached as:
* janpascal@vanbest.org andi@lisas.de
*
* Sources:
* Donald Becker's "skeleton.c"
* Crynwr ni5010 packet driver
*
* Changes:
* v0.0: First test version
* v0.1: First working version
* v0.2:
* v0.3->v0.90: Now demand setting io and irq when loading as module
* 970430 v0.91: modified for Linux 2.1.14
* v0.92: Implemented Andreas' (better) NI5010 probe
* 970503 v0.93: Fixed auto-irq failure on warm reboot (JB)
* 970623 v1.00: First kernel version (AM)
* 970814 v1.01: Added detection of onboard receive buffer size (AM)
* 060611 v1.02: slight cleanup: email addresses, driver modernization.
* Bugs:
* - not SMP-safe (no locking of I/O accesses)
* - Note that you have to patch ifconfig for the new /proc/net/dev
* format. It gives incorrect stats otherwise.
*
* To do:
* Fix all bugs :-)
* Move some stuff to chipset_init()
* Handle xmt errors other than collisions
* Complete merge with Andreas' driver
* Implement ring buffers (Is this useful? You can't squeeze
* too many packet in a 2k buffer!)
* Implement DMA (Again, is this useful? Some docs say DMA is
* slower than programmed I/O)
*
* Compile with:
* gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ \
* -DMODULE -c ni5010.c
*
* Insert with e.g.:
* insmod ni5010.ko io=0x300 irq=5
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "ni5010.h"
static const char boardname[] = "NI5010";
static char version[] __initdata =
"ni5010.c: v1.02 20060611 Jan-Pascal van Best and Andreas Mohr\n";
/* bufsize_rcv == 0 means autoprobing */
static unsigned int bufsize_rcv;
#define JUMPERED_INTERRUPTS /* IRQ line jumpered on board */
#undef JUMPERED_DMA /* No DMA used */
#undef FULL_IODETECT /* Only detect in portlist */
#ifndef FULL_IODETECT
/* A zero-terminated list of I/O addresses to be probed. */
static unsigned int ports[] __initdata =
{ 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0 };
#endif
/* Use 0 for production, 1 for verification, >2 for debug */
#ifndef NI5010_DEBUG
#define NI5010_DEBUG 0
#endif
/* Information that needs to be kept for each board. */
struct ni5010_local {
int o_pkt_size;
spinlock_t lock;
};
/* Index to functions, as function prototypes. */
static int ni5010_probe1(struct net_device *dev, int ioaddr);
static int ni5010_open(struct net_device *dev);
static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t ni5010_interrupt(int irq, void *dev_id);
static void ni5010_rx(struct net_device *dev);
static void ni5010_timeout(struct net_device *dev);
static int ni5010_close(struct net_device *dev);
static void ni5010_set_multicast_list(struct net_device *dev);
static void reset_receiver(struct net_device *dev);
static int process_xmt_interrupt(struct net_device *dev);
#define tx_done(dev) 1
static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad);
static void chipset_init(struct net_device *dev, int startp);
static void dump_packet(void *buf, int len);
static void ni5010_show_registers(struct net_device *dev);
static int io;
static int irq;
struct net_device * __init ni5010_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct ni5010_local));
int *port;
int err = 0;
if (!dev)
return ERR_PTR(-ENOMEM);
if (unit >= 0) {
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
io = dev->base_addr;
irq = dev->irq;
}
PRINTK2((KERN_DEBUG "%s: Entering ni5010_probe\n", dev->name));
if (io > 0x1ff) { /* Check a single specified location. */
err = ni5010_probe1(dev, io);
} else if (io != 0) { /* Don't probe at all. */
err = -ENXIO;
} else {
#ifdef FULL_IODETECT
for (io=0x200; io<0x400 && ni5010_probe1(dev, io) ; io+=0x20)
;
if (io == 0x400)
err = -ENODEV;
#else
for (port = ports; *port && ni5010_probe1(dev, *port); port++)
;
if (!*port)
err = -ENODEV;
#endif /* FULL_IODETECT */
}
if (err)
goto out;
err = register_netdev(dev);
if (err)
goto out1;
return dev;
out1:
release_region(dev->base_addr, NI5010_IO_EXTENT);
out:
free_netdev(dev);
return ERR_PTR(err);
}
static inline int rd_port(int ioaddr)
{
inb(IE_RBUF);
return inb(IE_SAPROM);
}
static void __init trigger_irq(int ioaddr)
{
outb(0x00, EDLC_RESET); /* Clear EDLC hold RESET state */
outb(0x00, IE_RESET); /* Board reset */
outb(0x00, EDLC_XMASK); /* Disable all Xmt interrupts */
outb(0x00, EDLC_RMASK); /* Disable all Rcv interrupt */
outb(0xff, EDLC_XCLR); /* Clear all pending Xmt interrupts */
outb(0xff, EDLC_RCLR); /* Clear all pending Rcv interrupts */
/*
* Transmit packet mode: Ignore parity, Power xcvr,
* Enable loopback
*/
outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
outb(RMD_BROADCAST, EDLC_RMODE); /* Receive normal&broadcast */
outb(XM_ALL, EDLC_XMASK); /* Enable all Xmt interrupts */
udelay(50); /* FIXME: Necessary? */
outb(MM_EN_XMT|MM_MUX, IE_MMODE); /* Start transmission */
}
static const struct net_device_ops ni5010_netdev_ops = {
.ndo_open = ni5010_open,
.ndo_stop = ni5010_close,
.ndo_start_xmit = ni5010_send_packet,
.ndo_set_multicast_list = ni5010_set_multicast_list,
.ndo_tx_timeout = ni5010_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
};
/*
* This is the real probe routine. Linux has a history of friendly device
* probes on the ISA bus. A good device probes avoids doing writes, and
* verifies that the correct device exists and functions.
*/
static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
{
static unsigned version_printed;
struct ni5010_local *lp;
int i;
unsigned int data = 0;
int boguscount = 40;
int err = -ENODEV;
dev->base_addr = ioaddr;
dev->irq = irq;
if (!request_region(ioaddr, NI5010_IO_EXTENT, boardname))
return -EBUSY;
/*
* This is no "official" probe method, I've rather tested which
* probe works best with my seven NI5010 cards
* (they have very different serial numbers)
* Suggestions or failure reports are very, very welcome !
* But I think it is a relatively good probe method
* since it doesn't use any "outb"
* It should be nearly 100% reliable !
* well-known WARNING: this probe method (like many others)
* will hang the system if a NE2000 card region is probed !
*
* - Andreas
*/
PRINTK2((KERN_DEBUG "%s: entering ni5010_probe1(%#3x)\n",
dev->name, ioaddr));
if (inb(ioaddr+0) == 0xff)
goto out;
while ( (rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr) &
rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr)) != 0xff)
{
if (boguscount-- == 0)
goto out;
}
PRINTK2((KERN_DEBUG "%s: I/O #1 passed!\n", dev->name));
for (i=0; i<32; i++)
if ( (data = rd_port(ioaddr)) != 0xff) break;
if (data==0xff)
goto out;
PRINTK2((KERN_DEBUG "%s: I/O #2 passed!\n", dev->name));
if ((data != SA_ADDR0) || (rd_port(ioaddr) != SA_ADDR1) ||
(rd_port(ioaddr) != SA_ADDR2))
goto out;
for (i=0; i<4; i++)
rd_port(ioaddr);
if ( (rd_port(ioaddr) != NI5010_MAGICVAL1) ||
(rd_port(ioaddr) != NI5010_MAGICVAL2) )
goto out;
PRINTK2((KERN_DEBUG "%s: I/O #3 passed!\n", dev->name));
if (NI5010_DEBUG && version_printed++ == 0)
printk(KERN_INFO "%s", version);
printk("NI5010 ethercard probe at 0x%x: ", ioaddr);
dev->base_addr = ioaddr;
for (i=0; i<6; i++) {
outw(i, IE_GP);
dev->dev_addr[i] = inb(IE_SAPROM);
}
printk("%pM ", dev->dev_addr);
PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name));
#ifdef JUMPERED_INTERRUPTS
if (dev->irq == 0xff)
;
else if (dev->irq < 2) {
unsigned long irq_mask;
PRINTK2((KERN_DEBUG "%s: I/O #5 passed!\n", dev->name));
irq_mask = probe_irq_on();
trigger_irq(ioaddr);
mdelay(20);
dev->irq = probe_irq_off(irq_mask);
PRINTK2((KERN_DEBUG "%s: I/O #6 passed!\n", dev->name));
if (dev->irq == 0) {
err = -EAGAIN;
printk(KERN_WARNING "%s: no IRQ found!\n", dev->name);
goto out;
}
PRINTK2((KERN_DEBUG "%s: I/O #7 passed!\n", dev->name));
} else if (dev->irq == 2) {
dev->irq = 9;
}
#endif /* JUMPERED_INTERRUPTS */
PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name));
/* DMA is not supported (yet?), so no use detecting it */
lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
PRINTK2((KERN_DEBUG "%s: I/O #10 passed!\n", dev->name));
/* get the size of the onboard receive buffer
* higher addresses than bufsize are wrapped into real buffer
* i.e. data for offs. 0x801 is written to 0x1 with a 2K onboard buffer
*/
if (!bufsize_rcv) {
outb(1, IE_MMODE); /* Put Rcv buffer on system bus */
outw(0, IE_GP); /* Point GP at start of packet */
outb(0, IE_RBUF); /* set buffer byte 0 to 0 */
for (i = 1; i < 0xff; i++) {
outw(i << 8, IE_GP); /* Point GP at packet size to be tested */
outb(i, IE_RBUF);
outw(0x0, IE_GP); /* Point GP at start of packet */
data = inb(IE_RBUF);
if (data == i) break;
}
bufsize_rcv = i << 8;
outw(0, IE_GP); /* Point GP at start of packet */
outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */
}
printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
dev->netdev_ops = &ni5010_netdev_ops;
dev->watchdog_timeo = HZ/20;
dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
/* Shut up the ni5010 */
outb(0, EDLC_RMASK); /* Mask all receive interrupts */
outb(0, EDLC_XMASK); /* Mask all xmit interrupts */
outb(0xff, EDLC_RCLR); /* Kill all pending rcv interrupts */
outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */
printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq);
if (dev->dma)
printk(" & DMA %d", dev->dma);
printk(".\n");
return 0;
out:
release_region(dev->base_addr, NI5010_IO_EXTENT);
return err;
}
/*
* Open/initialize the board. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
* there is a non-reboot way to recover if something goes wrong.
*/
static int ni5010_open(struct net_device *dev)
{
int ioaddr = dev->base_addr;
int i;
PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
if (request_irq(dev->irq, ni5010_interrupt, 0, boardname, dev)) {
printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
return -EAGAIN;
}
PRINTK3((KERN_DEBUG "%s: passed open() #1\n", dev->name));
/*
* Always allocate the DMA channel after the IRQ,
* and clean up on failure.
*/
#ifdef JUMPERED_DMA
if (request_dma(dev->dma, cardname)) {
printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma);
free_irq(dev->irq, NULL);
return -EAGAIN;
}
#endif /* JUMPERED_DMA */
PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name));
/* Reset the hardware here. Don't forget to set the station address. */
outb(RS_RESET, EDLC_RESET); /* Hold up EDLC_RESET while configing board */
outb(0, IE_RESET); /* Hardware reset of ni5010 board */
outb(XMD_LBC, EDLC_XMODE); /* Only loopback xmits */
PRINTK3((KERN_DEBUG "%s: passed open() #3\n", dev->name));
/* Set the station address */
for(i = 0;i < 6; i++) {
outb(dev->dev_addr[i], EDLC_ADDR + i);
}
PRINTK3((KERN_DEBUG "%s: Initialising ni5010\n", dev->name));
outb(0, EDLC_XMASK); /* No xmit interrupts for now */
outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
/* Normal packet xmit mode */
outb(0xff, EDLC_XCLR); /* Clear all pending xmit interrupts */
outb(RMD_BROADCAST, EDLC_RMODE);
/* Receive broadcast and normal packets */
reset_receiver(dev); /* Ready ni5010 for receiving packets */
outb(0, EDLC_RESET); /* Un-reset the ni5010 */
netif_start_queue(dev);
if (NI5010_DEBUG) ni5010_show_registers(dev);
PRINTK((KERN_DEBUG "%s: open successful\n", dev->name));
return 0;
}
static void reset_receiver(struct net_device *dev)
{
int ioaddr = dev->base_addr;
PRINTK3((KERN_DEBUG "%s: resetting receiver\n", dev->name));
outw(0, IE_GP); /* Receive packet at start of buffer */
outb(0xff, EDLC_RCLR); /* Clear all pending rcv interrupts */
outb(0, IE_MMODE); /* Put EDLC to rcv buffer */
outb(MM_EN_RCV, IE_MMODE); /* Enable rcv */
outb(0xff, EDLC_RMASK); /* Enable all rcv interrupts */
}
static void ni5010_timeout(struct net_device *dev)
{
printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
tx_done(dev) ? "IRQ conflict" : "network cable problem");
/* Try to restart the adaptor. */
/* FIXME: Give it a real kick here */
chipset_init(dev, 1);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
{
int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
PRINTK2((KERN_DEBUG "%s: entering ni5010_send_packet\n", dev->name));
/*
* Block sending
*/
netif_stop_queue(dev);
hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
/*
* The typical workload of the driver:
* Handle the network interface interrupts.
*/
static irqreturn_t ni5010_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct ni5010_local *lp;
int ioaddr, status;
int xmit_was_error = 0;
PRINTK2((KERN_DEBUG "%s: entering ni5010_interrupt\n", dev->name));
ioaddr = dev->base_addr;
lp = netdev_priv(dev);
spin_lock(&lp->lock);
status = inb(IE_ISTAT);
PRINTK3((KERN_DEBUG "%s: IE_ISTAT = %#02x\n", dev->name, status));
if ((status & IS_R_INT) == 0) ni5010_rx(dev);
if ((status & IS_X_INT) == 0) {
xmit_was_error = process_xmt_interrupt(dev);
}
if ((status & IS_DMA_INT) == 0) {
PRINTK((KERN_DEBUG "%s: DMA complete (?)\n", dev->name));
outb(0, IE_DMA_RST); /* Reset DMA int */
}
if (!xmit_was_error)
reset_receiver(dev);
spin_unlock(&lp->lock);
return IRQ_HANDLED;
}
static void dump_packet(void *buf, int len)
{
int i;
printk(KERN_DEBUG "Packet length = %#4x\n", len);
for (i = 0; i < len; i++){
if (i % 16 == 0) printk(KERN_DEBUG "%#4.4x", i);
if (i % 2 == 0) printk(" ");
printk("%2.2x", ((unsigned char *)buf)[i]);
if (i % 16 == 15) printk("\n");
}
printk("\n");
}
/* We have a good packet, get it out of the buffer. */
static void ni5010_rx(struct net_device *dev)
{
int ioaddr = dev->base_addr;
unsigned char rcv_stat;
struct sk_buff *skb;
int i_pkt_size;
PRINTK2((KERN_DEBUG "%s: entering ni5010_rx()\n", dev->name));
rcv_stat = inb(EDLC_RSTAT);
PRINTK3((KERN_DEBUG "%s: EDLC_RSTAT = %#2x\n", dev->name, rcv_stat));
if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) {
PRINTK((KERN_INFO "%s: receive error.\n", dev->name));
dev->stats.rx_errors++;
if (rcv_stat & RS_RUNT) dev->stats.rx_length_errors++;
if (rcv_stat & RS_ALIGN) dev->stats.rx_frame_errors++;
if (rcv_stat & RS_CRC_ERR) dev->stats.rx_crc_errors++;
if (rcv_stat & RS_OFLW) dev->stats.rx_fifo_errors++;
outb(0xff, EDLC_RCLR); /* Clear the interrupt */
return;
}
outb(0xff, EDLC_RCLR); /* Clear the interrupt */
i_pkt_size = inw(IE_RCNT);
if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) {
PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n",
dev->name, i_pkt_size));
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
return;
}
/* Malloc up new buffer. */
skb = dev_alloc_skb(i_pkt_size + 3);
if (skb == NULL) {
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
return;
}
skb_reserve(skb, 2);
/* Read packet into buffer */
outb(MM_MUX, IE_MMODE); /* Rcv buffer to system bus */
outw(0, IE_GP); /* Seek to beginning of packet */
insb(IE_RBUF, skb_put(skb, i_pkt_size), i_pkt_size);
if (NI5010_DEBUG >= 4)
dump_packet(skb->data, skb->len);
skb->protocol = eth_type_trans(skb,dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += i_pkt_size;
PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n",
dev->name, i_pkt_size));
}
static int process_xmt_interrupt(struct net_device *dev)
{
struct ni5010_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
int xmit_stat;
PRINTK2((KERN_DEBUG "%s: entering process_xmt_interrupt\n", dev->name));
xmit_stat = inb(EDLC_XSTAT);
PRINTK3((KERN_DEBUG "%s: EDLC_XSTAT = %2.2x\n", dev->name, xmit_stat));
outb(0, EDLC_XMASK); /* Disable xmit IRQ's */
outb(0xff, EDLC_XCLR); /* Clear all pending xmit IRQ's */
if (xmit_stat & XS_COLL){
PRINTK((KERN_DEBUG "%s: collision detected, retransmitting\n",
dev->name));
outw(NI5010_BUFSIZE - lp->o_pkt_size, IE_GP);
/* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */
outb(MM_EN_XMT | MM_MUX, IE_MMODE);
outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */
dev->stats.collisions++;
return 1;
}
/* FIXME: handle other xmt error conditions */
dev->stats.tx_packets++;
dev->stats.tx_bytes += lp->o_pkt_size;
netif_wake_queue(dev);
PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n",
dev->name, lp->o_pkt_size));
return 0;
}
/* The inverse routine to ni5010_open(). */
static int ni5010_close(struct net_device *dev)
{
int ioaddr = dev->base_addr;
PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name));
#ifdef JUMPERED_INTERRUPTS
free_irq(dev->irq, NULL);
#endif
/* Put card in held-RESET state */
outb(0, IE_MMODE);
outb(RS_RESET, EDLC_RESET);
netif_stop_queue(dev);
PRINTK((KERN_DEBUG "%s: %s closed down\n", dev->name, boardname));
return 0;
}
/* Set or clear the multicast filter for this adaptor.
num_addrs == -1 Promiscuous mode, receive all packets
num_addrs == 0 Normal mode, clear multicast list
num_addrs > 0 Multicast mode, receive normal and MC packets, and do
best-effort filtering.
*/
static void ni5010_set_multicast_list(struct net_device *dev)
{
short ioaddr = dev->base_addr;
PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
!netdev_mc_empty(dev)) {
outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
} else {
PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name));
outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */
}
}
static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad)
{
struct ni5010_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
unsigned long flags;
unsigned int buf_offs;
PRINTK2((KERN_DEBUG "%s: entering hardware_send_packet\n", dev->name));
if (length > ETH_FRAME_LEN) {
PRINTK((KERN_WARNING "%s: packet too large, not possible\n",
dev->name));
return;
}
if (NI5010_DEBUG) ni5010_show_registers(dev);
if (inb(IE_ISTAT) & IS_EN_XMT) {
PRINTK((KERN_WARNING "%s: sending packet while already transmitting, not possible\n",
dev->name));
return;
}
if (NI5010_DEBUG > 3) dump_packet(buf, length);
buf_offs = NI5010_BUFSIZE - length - pad;
spin_lock_irqsave(&lp->lock, flags);
lp->o_pkt_size = length + pad;
outb(0, EDLC_RMASK); /* Mask all receive interrupts */
outb(0, IE_MMODE); /* Put Xmit buffer on system bus */
outb(0xff, EDLC_RCLR); /* Clear out pending rcv interrupts */
outw(buf_offs, IE_GP); /* Point GP at start of packet */
outsb(IE_XBUF, buf, length); /* Put data in buffer */
while(pad--)
outb(0, IE_XBUF);
outw(buf_offs, IE_GP); /* Rewrite where packet starts */
/* should work without that outb() (Crynwr used it) */
/*outb(MM_MUX, IE_MMODE);*/ /* Xmt buffer to EDLC bus */
outb(MM_EN_XMT | MM_MUX, IE_MMODE); /* Begin transmission */
outb(XM_ALL, EDLC_XMASK); /* Cause interrupt after completion or fail */
spin_unlock_irqrestore(&lp->lock, flags);
netif_wake_queue(dev);
if (NI5010_DEBUG) ni5010_show_registers(dev);
}
static void chipset_init(struct net_device *dev, int startp)
{
/* FIXME: Move some stuff here */
PRINTK3((KERN_DEBUG "%s: doing NOTHING in chipset_init\n", dev->name));
}
static void ni5010_show_registers(struct net_device *dev)
{
int ioaddr = dev->base_addr;
PRINTK3((KERN_DEBUG "%s: XSTAT %#2.2x\n", dev->name, inb(EDLC_XSTAT)));
PRINTK3((KERN_DEBUG "%s: XMASK %#2.2x\n", dev->name, inb(EDLC_XMASK)));
PRINTK3((KERN_DEBUG "%s: RSTAT %#2.2x\n", dev->name, inb(EDLC_RSTAT)));
PRINTK3((KERN_DEBUG "%s: RMASK %#2.2x\n", dev->name, inb(EDLC_RMASK)));
PRINTK3((KERN_DEBUG "%s: RMODE %#2.2x\n", dev->name, inb(EDLC_RMODE)));
PRINTK3((KERN_DEBUG "%s: XMODE %#2.2x\n", dev->name, inb(EDLC_XMODE)));
PRINTK3((KERN_DEBUG "%s: ISTAT %#2.2x\n", dev->name, inb(IE_ISTAT)));
}
#ifdef MODULE
static struct net_device *dev_ni5010;
module_param(io, int, 0);
module_param(irq, int, 0);
MODULE_PARM_DESC(io, "ni5010 I/O base address");
MODULE_PARM_DESC(irq, "ni5010 IRQ number");
static int __init ni5010_init_module(void)
{
PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname));
/*
if(io <= 0 || irq == 0){
printk(KERN_WARNING "%s: Autoprobing not allowed for modules.\n", boardname);
printk(KERN_WARNING "%s: Set symbols 'io' and 'irq'\n", boardname);
return -EINVAL;
}
*/
if (io <= 0){
printk(KERN_WARNING "%s: Autoprobing for modules is hazardous, trying anyway..\n", boardname);
}
PRINTK2((KERN_DEBUG "%s: init_module irq=%#2x, io=%#3x\n", boardname, irq, io));
dev_ni5010 = ni5010_probe(-1);
if (IS_ERR(dev_ni5010))
return PTR_ERR(dev_ni5010);
return 0;
}
static void __exit ni5010_cleanup_module(void)
{
PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname));
unregister_netdev(dev_ni5010);
release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT);
free_netdev(dev_ni5010);
}
module_init(ni5010_init_module);
module_exit(ni5010_cleanup_module);
#endif /* MODULE */
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_lt03lte | drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c | 4190 | 53897 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include <linux/export.h>
#include "dm_common.h"
#include "phy_common.h"
#include "../pci.h"
#include "../base.h"
struct dig_t dm_digtable;
static struct ps_t dm_pstable;
#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
#define RTLPRIV (struct rtl_priv *)
#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
((RTLPRIV(_priv))->mac80211.opmode == \
NL80211_IFTYPE_ADHOC) ? \
((RTLPRIV(_priv))->dm.entry_min_undecoratedsmoothed_pwdb) : \
((RTLPRIV(_priv))->dm.undecorated_smoothed_pwdb)
static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
0x7f8001fe,
0x788001e2,
0x71c001c7,
0x6b8001ae,
0x65400195,
0x5fc0017f,
0x5a400169,
0x55400155,
0x50800142,
0x4c000130,
0x47c0011f,
0x43c0010f,
0x40000100,
0x3c8000f2,
0x390000e4,
0x35c000d7,
0x32c000cb,
0x300000c0,
0x2d4000b5,
0x2ac000ab,
0x288000a2,
0x26000098,
0x24000090,
0x22000088,
0x20000080,
0x1e400079,
0x1c800072,
0x1b00006c,
0x19800066,
0x18000060,
0x16c0005b,
0x15800056,
0x14400051,
0x1300004c,
0x12000048,
0x11000044,
0x10000040,
};
static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
{0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
{0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
{0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
{0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
{0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
{0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
{0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
{0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
{0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
{0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
{0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
{0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
{0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
{0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
{0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
{0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
{0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
{0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
{0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
{0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
{0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
{0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
};
static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
{0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
{0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
{0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
{0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
{0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
{0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
{0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
{0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
{0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
{0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
{0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
{0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
{0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
{0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
{0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
{0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
{0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
{0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
{0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
{0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
};
static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
{
dm_digtable.dig_enable_flag = true;
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
dm_digtable.cur_igvalue = 0x20;
dm_digtable.pre_igvalue = 0x0;
dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
dm_digtable.rx_gain_range_max = DM_DIG_MAX;
dm_digtable.rx_gain_range_min = DM_DIG_MIN;
dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
}
static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
long rssi_val_min = 0;
if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
(dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
rssi_val_min =
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
rtlpriv->dm.undecorated_smoothed_pwdb) ?
rtlpriv->dm.undecorated_smoothed_pwdb :
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
else
rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
} else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
} else if (dm_digtable.curmultista_connectstate ==
DIG_MULTISTA_CONNECT) {
rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
}
return (u8) rssi_val_min;
}
static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
falsealm_cnt->cnt_cck_fail = ret_value;
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail +
falsealm_cnt->cnt_mcs_fail +
falsealm_cnt->cnt_cck_fail);
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
"cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
falsealm_cnt->cnt_parity_fail,
falsealm_cnt->cnt_rate_illegal,
falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
"cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
falsealm_cnt->cnt_ofdm_fail,
falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 value_igi = dm_digtable.cur_igvalue;
if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
value_igi--;
else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
value_igi += 0;
else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
value_igi++;
else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
if (value_igi > DM_DIG_FA_UPPER)
value_igi = DM_DIG_FA_UPPER;
else if (value_igi < DM_DIG_FA_LOWER)
value_igi = DM_DIG_FA_LOWER;
if (rtlpriv->falsealm_cnt.cnt_all > 10000)
value_igi = 0x32;
dm_digtable.cur_igvalue = value_igi;
rtl92c_dm_write_dig(hw);
}
static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
if ((dm_digtable.backoff_val - 2) <
dm_digtable.backoff_val_range_min)
dm_digtable.backoff_val =
dm_digtable.backoff_val_range_min;
else
dm_digtable.backoff_val -= 2;
} else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
if ((dm_digtable.backoff_val + 2) >
dm_digtable.backoff_val_range_max)
dm_digtable.backoff_val =
dm_digtable.backoff_val_range_max;
else
dm_digtable.backoff_val += 2;
}
if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
dm_digtable.rx_gain_range_max)
dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
else if ((dm_digtable.rssi_val_min + 10 -
dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
else
dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
dm_digtable.backoff_val;
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
"rssi_val_min = %x backoff_val %x\n",
dm_digtable.rssi_val_min, dm_digtable.backoff_val);
rtl92c_dm_write_dig(hw);
}
static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
{
static u8 initialized; /* initialized to false */
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
bool multi_sta = false;
if (mac->opmode == NL80211_IFTYPE_ADHOC)
multi_sta = true;
if (!multi_sta ||
dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
initialized = false;
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
return;
} else if (initialized == false) {
initialized = true;
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
dm_digtable.cur_igvalue = 0x20;
rtl92c_dm_write_dig(hw);
}
if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
(dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
if (dm_digtable.dig_ext_port_stage ==
DIG_EXT_PORT_STAGE_2) {
dm_digtable.cur_igvalue = 0x20;
rtl92c_dm_write_dig(hw);
}
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
} else if (rssi_strength > dm_digtable.rssi_highthresh) {
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
rtl92c_dm_ctrl_initgain_by_fa(hw);
}
} else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
dm_digtable.cur_igvalue = 0x20;
rtl92c_dm_write_dig(hw);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
"curmultista_connectstate = %x dig_ext_port_stage %x\n",
dm_digtable.curmultista_connectstate,
dm_digtable.dig_ext_port_stage);
}
static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
"presta_connectstate = %x, cursta_connectctate = %x\n",
dm_digtable.presta_connectstate,
dm_digtable.cursta_connectctate);
if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
|| dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
|| dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
dm_digtable.rssi_val_min =
rtl92c_dm_initial_gain_min_pwdb(hw);
rtl92c_dm_ctrl_initgain_by_rssi(hw);
}
} else {
dm_digtable.rssi_val_min = 0;
dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
dm_digtable.cur_igvalue = 0x20;
dm_digtable.pre_igvalue = 0;
rtl92c_dm_write_dig(hw);
}
}
static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
if (dm_digtable.rssi_val_min <= 25)
dm_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LowRssi;
else
dm_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HighRssi;
} else {
if (dm_digtable.rssi_val_min <= 20)
dm_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LowRssi;
else
dm_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HighRssi;
}
} else {
dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
}
if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
dm_digtable.cur_cck_fa_state =
CCK_FA_STAGE_High;
else
dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
if (dm_digtable.pre_cck_fa_state !=
dm_digtable.cur_cck_fa_state) {
if (dm_digtable.cur_cck_fa_state ==
CCK_FA_STAGE_Low)
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
0x83);
else
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
0xcd);
dm_digtable.pre_cck_fa_state =
dm_digtable.cur_cck_fa_state;
}
rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
if (IS_92C_SERIAL(rtlhal->version))
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
MASKBYTE2, 0xd7);
} else {
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
if (IS_92C_SERIAL(rtlhal->version))
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
MASKBYTE2, 0xd3);
}
dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n",
dm_digtable.cur_cck_pd_state);
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n",
IS_92C_SERIAL(rtlhal->version));
}
static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
if (mac->act_scanning)
return;
if (mac->link_state >= MAC80211_LINKED)
dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
else
dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
rtl92c_dm_initial_gain_sta(hw);
rtl92c_dm_initial_gain_multi_sta(hw);
rtl92c_dm_cck_packet_detection_thresh(hw);
dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
}
static void rtl92c_dm_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.dm_initialgain_enable == false)
return;
if (dm_digtable.dig_enable_flag == false)
return;
rtl92c_dm_ctrl_initgain_by_twoport(hw);
}
static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dynamic_txpower_enable = false;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
}
void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
dm_digtable.backoff_val);
dm_digtable.cur_igvalue += 2;
if (dm_digtable.cur_igvalue > 0x3f)
dm_digtable.cur_igvalue = 0x3f;
if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
dm_digtable.cur_igvalue);
rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
dm_digtable.cur_igvalue);
dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
}
}
EXPORT_SYMBOL(rtl92c_dm_write_dig);
static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
u8 h2c_parameter[3] = { 0 };
return;
if (tmpentry_max_pwdb != 0) {
rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
tmpentry_max_pwdb;
} else {
rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
}
if (tmpentry_min_pwdb != 0xff) {
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
tmpentry_min_pwdb;
} else {
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
}
h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
h2c_parameter[0] = 0;
rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
}
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.current_turbo_edca = false;
rtlpriv->dm.is_any_nonbepkts = false;
rtlpriv->dm.is_cur_rdlstate = false;
}
EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
static u64 last_txok_cnt;
static u64 last_rxok_cnt;
static u32 last_bt_edca_ul;
static u32 last_bt_edca_dl;
u64 cur_txok_cnt = 0;
u64 cur_rxok_cnt = 0;
u32 edca_be_ul = 0x5ea42b;
u32 edca_be_dl = 0x5ea42b;
bool bt_change_edca = false;
if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) ||
(last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) {
rtlpriv->dm.current_turbo_edca = false;
last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl;
}
if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) {
edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
bt_change_edca = true;
}
if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) {
edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl;
bt_change_edca = true;
}
if (mac->link_state != MAC80211_LINKED) {
rtlpriv->dm.current_turbo_edca = false;
return;
}
if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) {
if (!(edca_be_ul & 0xffff0000))
edca_be_ul |= 0x005e0000;
if (!(edca_be_dl & 0xffff0000))
edca_be_dl |= 0x005e0000;
}
if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) &&
(!rtlpriv->dm.disable_framebursting))) {
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
if (cur_rxok_cnt > 4 * cur_txok_cnt) {
if (!rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv,
REG_EDCA_BE_PARAM,
edca_be_dl);
rtlpriv->dm.is_cur_rdlstate = true;
}
} else {
if (rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv,
REG_EDCA_BE_PARAM,
edca_be_ul);
rtlpriv->dm.is_cur_rdlstate = false;
}
}
rtlpriv->dm.current_turbo_edca = true;
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
(u8 *) (&tmp));
rtlpriv->dm.current_turbo_edca = false;
}
}
rtlpriv->dm.is_any_nonbepkts = false;
last_txok_cnt = rtlpriv->stats.txbytesunicast;
last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
}
static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
*hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 thermalvalue, delta, delta_lck, delta_iqk;
long ele_a, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0;
int i;
bool is2t = IS_92C_SERIAL(rtlhal->version);
s8 txpwr_level[2] = {0, 0};
u8 ofdm_min_index = 6, rf;
rtlpriv->dm.txpower_trackinginit = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"rtl92c_dm_txpower_tracking_callback_thermalmeter\n");
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
thermalvalue, rtlpriv->dm.thermalvalue,
rtlefuse->eeprom_thermalmeter);
rtl92c_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
if (is2t)
rf = 2;
else
rf = 1;
if (thermalvalue) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
ROFDM0_XATXIQIMBALANCE,
ele_d, ofdm_index_old[0]);
break;
}
}
if (is2t) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
if (ele_d == (ofdmswing_table[i] &
MASKOFDM_D)) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
"Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
ROFDM0_XBTXIQIMBALANCE, ele_d,
ofdm_index_old[1]);
break;
}
}
}
temp_cck =
rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
for (i = 0; i < CCK_TABLE_LENGTH; i++) {
if (rtlpriv->dm.cck_inch14) {
if (memcmp((void *)&temp_cck,
(void *)&cckswing_table_ch14[i][2],
4) == 0) {
cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
"Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
RCCK0_TXFILTER2, temp_cck,
cck_index_old,
rtlpriv->dm.cck_inch14);
break;
}
} else {
if (memcmp((void *)&temp_cck,
(void *)
&cckswing_table_ch1ch13[i][2],
4) == 0) {
cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
"Initial reg0x%x = 0x%lx, cck_index=0x%x, ch14 %d\n",
RCCK0_TXFILTER2, temp_cck,
cck_index_old,
rtlpriv->dm.cck_inch14);
break;
}
}
}
if (!rtlpriv->dm.thermalvalue) {
rtlpriv->dm.thermalvalue =
rtlefuse->eeprom_thermalmeter;
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
}
delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
(thermalvalue - rtlpriv->dm.thermalvalue) :
(rtlpriv->dm.thermalvalue - thermalvalue);
delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
(thermalvalue - rtlpriv->dm.thermalvalue_lck) :
(rtlpriv->dm.thermalvalue_lck - thermalvalue);
delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
thermalvalue, rtlpriv->dm.thermalvalue,
rtlefuse->eeprom_thermalmeter, delta, delta_lck,
delta_iqk);
if (delta_lck > 1) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtl92c_phy_lc_calibrate(hw);
}
if (delta > 0 && rtlpriv->dm.txpower_track_control) {
if (thermalvalue > rtlpriv->dm.thermalvalue) {
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] -= delta;
rtlpriv->dm.cck_index -= delta;
} else {
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] += delta;
rtlpriv->dm.cck_index += delta;
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.ofdm_index[1],
rtlpriv->dm.cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"temp OFDM_A_index=0x%x, cck_index=0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.cck_index);
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
for (i = 0; i < rf; i++)
ofdm_index[i] =
rtlpriv->dm.ofdm_index[i]
+ 1;
cck_index = rtlpriv->dm.cck_index + 1;
} else {
for (i = 0; i < rf; i++)
ofdm_index[i] =
rtlpriv->dm.ofdm_index[i];
cck_index = rtlpriv->dm.cck_index;
}
for (i = 0; i < rf; i++) {
if (txpwr_level[i] >= 0 &&
txpwr_level[i] <= 26) {
if (thermalvalue >
rtlefuse->eeprom_thermalmeter) {
if (delta < 5)
ofdm_index[i] -= 1;
else
ofdm_index[i] -= 2;
} else if (delta > 5 && thermalvalue <
rtlefuse->
eeprom_thermalmeter) {
ofdm_index[i] += 1;
}
} else if (txpwr_level[i] >= 27 &&
txpwr_level[i] <= 32
&& thermalvalue >
rtlefuse->eeprom_thermalmeter) {
if (delta < 5)
ofdm_index[i] -= 1;
else
ofdm_index[i] -= 2;
} else if (txpwr_level[i] >= 32 &&
txpwr_level[i] <= 38 &&
thermalvalue >
rtlefuse->eeprom_thermalmeter
&& delta > 5) {
ofdm_index[i] -= 1;
}
}
if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
if (thermalvalue >
rtlefuse->eeprom_thermalmeter) {
if (delta < 5)
cck_index -= 1;
else
cck_index -= 2;
} else if (delta > 5 && thermalvalue <
rtlefuse->eeprom_thermalmeter) {
cck_index += 1;
}
} else if (txpwr_level[i] >= 27 &&
txpwr_level[i] <= 32 &&
thermalvalue >
rtlefuse->eeprom_thermalmeter) {
if (delta < 5)
cck_index -= 1;
else
cck_index -= 2;
} else if (txpwr_level[i] >= 32 &&
txpwr_level[i] <= 38 &&
thermalvalue > rtlefuse->eeprom_thermalmeter
&& delta > 5) {
cck_index -= 1;
}
for (i = 0; i < rf; i++) {
if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
ofdm_index[i] = OFDM_TABLE_SIZE - 1;
else if (ofdm_index[i] < ofdm_min_index)
ofdm_index[i] = ofdm_min_index;
}
if (cck_index > CCK_TABLE_SIZE - 1)
cck_index = CCK_TABLE_SIZE - 1;
else if (cck_index < 0)
cck_index = 0;
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"new OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
ofdm_index[0], ofdm_index[1],
cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"new OFDM_A_index=0x%x, cck_index=0x%x\n",
ofdm_index[0], cck_index);
}
}
if (rtlpriv->dm.txpower_track_control && delta != 0) {
ele_d =
(ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
val_x = rtlphy->reg_e94;
val_y = rtlphy->reg_e9c;
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
if ((val_y & 0x00000200) != 0)
val_y = val_y | 0xFFFFFC00;
ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
value32 = (ele_d << 22) |
((ele_c & 0x3F) << 16) | ele_a;
rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(31), value32);
value32 = ((val_y * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(29), value32);
} else {
rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD,
ofdmswing_table[ofdm_index[0]]);
rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(31) | BIT(29), 0x00);
}
if (!rtlpriv->dm.cck_inch14) {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch1ch13[cck_index]
[0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch1ch13[cck_index]
[1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch1ch13[cck_index]
[2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch1ch13[cck_index]
[3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch1ch13[cck_index]
[4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch1ch13[cck_index]
[5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch1ch13[cck_index]
[6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch1ch13[cck_index]
[7]);
} else {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch14[cck_index]
[0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch14[cck_index]
[1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch14[cck_index]
[2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch14[cck_index]
[3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch14[cck_index]
[4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch14[cck_index]
[5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch14[cck_index]
[6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch14[cck_index]
[7]);
}
if (is2t) {
ele_d = (ofdmswing_table[ofdm_index[1]] &
0xFFC00000) >> 22;
val_x = rtlphy->reg_eb4;
val_y = rtlphy->reg_ebc;
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
ele_a = ((val_x * ele_d) >> 8) &
0x000003FF;
if ((val_y & 0x00000200) != 0)
val_y = val_y | 0xFFFFFC00;
ele_c = ((val_y * ele_d) >> 8) &
0x00003FF;
value32 = (ele_d << 22) |
((ele_c & 0x3F) << 16) | ele_a;
rtl_set_bbreg(hw,
ROFDM0_XBTXIQIMBALANCE,
MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
MASKH4BITS, value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(27), value32);
value32 = ((val_y * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(25), value32);
} else {
rtl_set_bbreg(hw,
ROFDM0_XBTXIQIMBALANCE,
MASKDWORD,
ofdmswing_table[ofdm_index
[1]]);
rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
MASKH4BITS, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(27) | BIT(25), 0x00);
}
}
}
if (delta_iqk > 3) {
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
rtl92c_phy_iq_calibrate(hw, false);
}
if (rtlpriv->dm.txpower_track_control)
rtlpriv->dm.thermalvalue = thermalvalue;
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"pMgntInfo->txpower_tracking = %d\n",
rtlpriv->dm.txpower_tracking);
}
static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
{
rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
}
static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
{
rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
}
static void rtl92c_dm_check_txpower_tracking_thermal_meter(
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
static u8 tm_trigger;
if (!rtlpriv->dm.txpower_tracking)
return;
if (!tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
0x60);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Trigger 92S Thermal Meter!!\n");
tm_trigger = 1;
return;
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Schedule TxPowerTracking direct call!!\n");
rtl92c_dm_txpower_tracking_directcall(hw);
tm_trigger = 0;
}
}
void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
{
rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
}
EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking);
void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rate_adaptive *p_ra = &(rtlpriv->ra);
p_ra->ratr_state = DM_RATR_STA_INIT;
p_ra->pre_ratr_state = DM_RATR_STA_INIT;
if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
rtlpriv->dm.useramask = true;
else
rtlpriv->dm.useramask = false;
}
EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask);
static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rate_adaptive *p_ra = &(rtlpriv->ra);
u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
"<---- driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
"<---- driver does not control rate adaptive mask\n");
return;
}
if (mac->link_state == MAC80211_LINKED &&
mac->opmode == NL80211_IFTYPE_STATION) {
switch (p_ra->pre_ratr_state) {
case DM_RATR_STA_HIGH:
high_rssithresh_for_ra = 50;
low_rssithresh_for_ra = 20;
break;
case DM_RATR_STA_MIDDLE:
high_rssithresh_for_ra = 55;
low_rssithresh_for_ra = 20;
break;
case DM_RATR_STA_LOW:
high_rssithresh_for_ra = 50;
low_rssithresh_for_ra = 25;
break;
default:
high_rssithresh_for_ra = 50;
low_rssithresh_for_ra = 20;
break;
}
if (rtlpriv->dm.undecorated_smoothed_pwdb >
(long)high_rssithresh_for_ra)
p_ra->ratr_state = DM_RATR_STA_HIGH;
else if (rtlpriv->dm.undecorated_smoothed_pwdb >
(long)low_rssithresh_for_ra)
p_ra->ratr_state = DM_RATR_STA_MIDDLE;
else
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n",
rtlpriv->dm.undecorated_smoothed_pwdb);
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
"RSSI_LEVEL = %d\n", p_ra->ratr_state);
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
"PreState = %d, CurState = %d\n",
p_ra->pre_ratr_state, p_ra->ratr_state);
/* Only the PCI card uses sta in the update rate table
* callback routine */
if (rtlhal->interface == INTF_PCI) {
rcu_read_lock();
sta = ieee80211_find_sta(mac->vif, mac->bssid);
}
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
p_ra->ratr_state);
p_ra->pre_ratr_state = p_ra->ratr_state;
if (rtlhal->interface == INTF_PCI)
rcu_read_unlock();
}
}
}
static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
{
dm_pstable.pre_ccastate = CCA_MAX;
dm_pstable.cur_ccasate = CCA_MAX;
dm_pstable.pre_rfstate = RF_MAX;
dm_pstable.cur_rfstate = RF_MAX;
dm_pstable.rssi_val_min = 0;
}
void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
{
static u8 initialize;
static u32 reg_874, reg_c70, reg_85c, reg_a74;
if (initialize == 0) {
reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
MASKDWORD) & 0x1CC000) >> 14;
reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
MASKDWORD) & BIT(3)) >> 3;
reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
MASKDWORD) & 0xFF000000) >> 24;
reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
initialize = 1;
}
if (!bforce_in_normal) {
if (dm_pstable.rssi_val_min != 0) {
if (dm_pstable.pre_rfstate == RF_NORMAL) {
if (dm_pstable.rssi_val_min >= 30)
dm_pstable.cur_rfstate = RF_SAVE;
else
dm_pstable.cur_rfstate = RF_NORMAL;
} else {
if (dm_pstable.rssi_val_min <= 25)
dm_pstable.cur_rfstate = RF_NORMAL;
else
dm_pstable.cur_rfstate = RF_SAVE;
}
} else {
dm_pstable.cur_rfstate = RF_MAX;
}
} else {
dm_pstable.cur_rfstate = RF_NORMAL;
}
if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
if (dm_pstable.cur_rfstate == RF_SAVE) {
rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
0x1C0000, 0x2);
rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
0xFF000000, 0x63);
rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
0xC000, 0x2);
rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
} else {
rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
0x1CC000, reg_874);
rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
reg_c70);
rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
reg_85c);
rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
}
dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
}
}
EXPORT_SYMBOL(rtl92c_dm_rf_saving);
static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (((mac->link_state == MAC80211_NOLINK)) &&
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
dm_pstable.rssi_val_min = 0;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
}
if (mac->link_state == MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
dm_pstable.rssi_val_min =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
dm_pstable.rssi_val_min);
} else {
dm_pstable.rssi_val_min =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
dm_pstable.rssi_val_min);
}
} else {
dm_pstable.rssi_val_min =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
dm_pstable.rssi_val_min);
}
if (IS_92C_SERIAL(rtlhal->version))
;/* rtl92c_dm_1r_cca(hw); */
else
rtl92c_dm_rf_saving(hw, false);
}
void rtl92c_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
rtl92c_dm_diginit(hw);
rtl92c_dm_init_dynamic_txpower(hw);
rtl92c_dm_init_edca_turbo(hw);
rtl92c_dm_init_rate_adaptive_mask(hw);
rtl92c_dm_initialize_txpower_tracking(hw);
rtl92c_dm_init_dynamic_bb_powersaving(hw);
}
EXPORT_SYMBOL(rtl92c_dm_init);
void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long undecorated_smoothed_pwdb;
if (!rtlpriv->dm.dynamic_txpower_enable)
return;
if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
}
if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undecorated_smoothed_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool fw_current_inpsmode = false;
bool fw_ps_awake = true;
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *) (&fw_current_inpsmode));
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
(u8 *) (&fw_ps_awake));
if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
fw_ps_awake)
&& (!ppsc->rfchange_inprogress)) {
rtl92c_dm_pwdb_monitor(hw);
rtl92c_dm_dig(hw);
rtl92c_dm_false_alarm_counter_statistics(hw);
rtl92c_dm_dynamic_bb_powersaving(hw);
rtl92c_dm_dynamic_txpower(hw);
rtl92c_dm_check_txpower_tracking(hw);
rtl92c_dm_refresh_rate_adaptive_mask(hw);
rtl92c_dm_bt_coexist(hw);
rtl92c_dm_check_edca_turbo(hw);
}
}
EXPORT_SYMBOL(rtl92c_dm_watchdog);
u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
long undecorated_smoothed_pwdb;
u8 curr_bt_rssi_state = 0x00;
if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
undecorated_smoothed_pwdb =
GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
} else {
if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)
undecorated_smoothed_pwdb = 100;
else
undecorated_smoothed_pwdb =
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
}
/* Check RSSI to determine HighPower/NormalPower state for
* BT coexistence. */
if (undecorated_smoothed_pwdb >= 67)
curr_bt_rssi_state &= (~BT_RSSI_STATE_NORMAL_POWER);
else if (undecorated_smoothed_pwdb < 62)
curr_bt_rssi_state |= BT_RSSI_STATE_NORMAL_POWER;
/* Check RSSI to determine AMPDU setting for BT coexistence. */
if (undecorated_smoothed_pwdb >= 40)
curr_bt_rssi_state &= (~BT_RSSI_STATE_AMDPU_OFF);
else if (undecorated_smoothed_pwdb <= 32)
curr_bt_rssi_state |= BT_RSSI_STATE_AMDPU_OFF;
/* Marked RSSI state. It will be used to determine BT coexistence
* setting later. */
if (undecorated_smoothed_pwdb < 35)
curr_bt_rssi_state |= BT_RSSI_STATE_SPECIAL_LOW;
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW);
/* Set Tx Power according to BT status. */
if (undecorated_smoothed_pwdb >= 30)
curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW;
else if (undecorated_smoothed_pwdb < 25)
curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW);
/* Check BT state related to BT_Idle in B/G mode. */
if (undecorated_smoothed_pwdb < 15)
curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW;
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW);
if (curr_bt_rssi_state != rtlpcipriv->bt_coexist.bt_rssi_state) {
rtlpcipriv->bt_coexist.bt_rssi_state = curr_bt_rssi_state;
return true;
} else {
return false;
}
}
EXPORT_SYMBOL(rtl92c_bt_rssi_state_change);
static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
u32 polling, ratio_tx, ratio_pri;
u32 bt_tx, bt_pri;
u8 bt_state;
u8 cur_service_type;
if (rtlpriv->mac80211.link_state < MAC80211_LINKED)
return false;
bt_state = rtl_read_byte(rtlpriv, 0x4fd);
bt_tx = rtl_read_dword(rtlpriv, 0x488);
bt_tx = bt_tx & 0x00ffffff;
bt_pri = rtl_read_dword(rtlpriv, 0x48c);
bt_pri = bt_pri & 0x00ffffff;
polling = rtl_read_dword(rtlpriv, 0x490);
if (bt_tx == 0xffffffff && bt_pri == 0xffffffff &&
polling == 0xffffffff && bt_state == 0xff)
return false;
bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1);
if (bt_state != rtlpcipriv->bt_coexist.bt_cur_state) {
rtlpcipriv->bt_coexist.bt_cur_state = bt_state;
if (rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
bt_state = bt_state |
((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
BIT_OFFSET_LEN_MASK_32(2, 1);
rtl_write_byte(rtlpriv, 0x4fd, bt_state);
}
return true;
}
ratio_tx = bt_tx * 1000 / polling;
ratio_pri = bt_pri * 1000 / polling;
rtlpcipriv->bt_coexist.ratio_tx = ratio_tx;
rtlpcipriv->bt_coexist.ratio_pri = ratio_pri;
if (bt_state && rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
if ((ratio_tx < 30) && (ratio_pri < 30))
cur_service_type = BT_IDLE;
else if ((ratio_pri > 110) && (ratio_pri < 250))
cur_service_type = BT_SCO;
else if ((ratio_tx >= 200) && (ratio_pri >= 200))
cur_service_type = BT_BUSY;
else if ((ratio_tx >= 350) && (ratio_tx < 500))
cur_service_type = BT_OTHERBUSY;
else if (ratio_tx >= 500)
cur_service_type = BT_PAN;
else
cur_service_type = BT_OTHER_ACTION;
if (cur_service_type != rtlpcipriv->bt_coexist.bt_service) {
rtlpcipriv->bt_coexist.bt_service = cur_service_type;
bt_state = bt_state |
((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) ?
0 : BIT_OFFSET_LEN_MASK_32(2, 1));
/* Add interrupt migration when bt is not ini
* idle state (no traffic). */
if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
rtl_write_word(rtlpriv, 0x504, 0x0ccc);
rtl_write_byte(rtlpriv, 0x506, 0x54);
rtl_write_byte(rtlpriv, 0x507, 0x54);
} else {
rtl_write_byte(rtlpriv, 0x506, 0x00);
rtl_write_byte(rtlpriv, 0x507, 0x00);
}
rtl_write_byte(rtlpriv, 0x4fd, bt_state);
return true;
}
}
return false;
}
static bool rtl92c_bt_wifi_connect_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
static bool media_connect;
if (rtlpriv->mac80211.link_state < MAC80211_LINKED) {
media_connect = false;
} else {
if (!media_connect) {
media_connect = true;
return true;
}
media_connect = true;
}
return false;
}
static void rtl92c_bt_set_normal(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
if (rtlpcipriv->bt_coexist.bt_service == BT_OTHERBUSY) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72b;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72b;
} else if (rtlpcipriv->bt_coexist.bt_service == BT_BUSY) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82f;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82f;
} else if (rtlpcipriv->bt_coexist.bt_service == BT_SCO) {
if (rtlpcipriv->bt_coexist.ratio_tx > 160) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72f;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72f;
} else {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea32b;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea42b;
}
} else {
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
}
if ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) &&
(rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
(rtlpriv->mac80211.mode == (WIRELESS_MODE_G | WIRELESS_MODE_B))) &&
(rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_BG_EDCA_LOW)) {
rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82b;
rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82b;
}
}
static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
/* Only enable HW BT coexist when BT in "Busy" state. */
if (rtlpriv->mac80211.vendor == PEER_CISCO &&
rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else {
if ((rtlpcipriv->bt_coexist.bt_service == BT_BUSY) &&
(rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_NORMAL_POWER)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else if ((rtlpcipriv->bt_coexist.bt_service ==
BT_OTHER_ACTION) && (rtlpriv->mac80211.mode <
WIRELESS_MODE_N_24G) &&
(rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_SPECIAL_LOW)) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
} else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00);
}
}
if (rtlpcipriv->bt_coexist.bt_service == BT_PAN)
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x10100);
else
rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x0);
if (rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_NORMAL_POWER) {
rtl92c_bt_set_normal(hw);
} else {
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
}
if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
rtlpriv->cfg->ops->set_rfreg(hw,
RF90_PATH_A,
0x1e,
0xf0, 0xf);
} else {
rtlpriv->cfg->ops->set_rfreg(hw,
RF90_PATH_A, 0x1e, 0xf0,
rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
}
if (!rtlpriv->dm.dynamic_txpower_enable) {
if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
if (rtlpcipriv->bt_coexist.bt_rssi_state &
BT_RSSI_STATE_TXPOWER_LOW) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_BT2;
} else {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_BT1;
}
} else {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
}
rtl92c_phy_set_txpower_level(hw,
rtlpriv->phy.current_channel);
}
}
static void rtl92c_check_bt_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
if (rtlpcipriv->bt_coexist.bt_cur_state) {
if (rtlpcipriv->bt_coexist.bt_ant_isolation)
rtl92c_bt_ant_isolation(hw);
} else {
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00);
rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0,
rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
rtlpcipriv->bt_coexist.bt_edca_ul = 0;
rtlpcipriv->bt_coexist.bt_edca_dl = 0;
}
}
void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
bool wifi_connect_change;
bool bt_state_change;
bool rssi_state_change;
if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
(rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
wifi_connect_change = rtl92c_bt_wifi_connect_change(hw);
bt_state_change = rtl92c_bt_state_change(hw);
rssi_state_change = rtl92c_bt_rssi_state_change(hw);
if (wifi_connect_change || bt_state_change || rssi_state_change)
rtl92c_check_bt_change(hw);
}
}
EXPORT_SYMBOL(rtl92c_dm_bt_coexist);
| gpl-2.0 |
lujji/JXD-7800b-JB-kernel | drivers/staging/go7007/snd-go7007.c | 4190 | 8130 | /*
* Copyright (C) 2005-2006 Micronas USA Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <asm/system.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include "go7007-priv.h"
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
module_param_array(id, charp, NULL, 0444);
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for the go7007 audio driver");
MODULE_PARM_DESC(id, "ID string for the go7007 audio driver");
MODULE_PARM_DESC(enable, "Enable for the go7007 audio driver");
struct go7007_snd {
struct snd_card *card;
struct snd_pcm *pcm;
struct snd_pcm_substream *substream;
spinlock_t lock;
int w_idx;
int hw_ptr;
int avail;
int capturing;
};
static struct snd_pcm_hardware go7007_snd_capture_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 4096,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 32,
};
static void parse_audio_stream_data(struct go7007 *go, u8 *buf, int length)
{
struct go7007_snd *gosnd = go->snd_context;
struct snd_pcm_runtime *runtime = gosnd->substream->runtime;
int frames = bytes_to_frames(runtime, length);
spin_lock(&gosnd->lock);
gosnd->hw_ptr += frames;
if (gosnd->hw_ptr >= runtime->buffer_size)
gosnd->hw_ptr -= runtime->buffer_size;
gosnd->avail += frames;
spin_unlock(&gosnd->lock);
if (gosnd->w_idx + length > runtime->dma_bytes) {
int cpy = runtime->dma_bytes - gosnd->w_idx;
memcpy(runtime->dma_area + gosnd->w_idx, buf, cpy);
length -= cpy;
buf += cpy;
gosnd->w_idx = 0;
}
memcpy(runtime->dma_area + gosnd->w_idx, buf, length);
gosnd->w_idx += length;
spin_lock(&gosnd->lock);
if (gosnd->avail < runtime->period_size) {
spin_unlock(&gosnd->lock);
return;
}
gosnd->avail -= runtime->period_size;
spin_unlock(&gosnd->lock);
if (gosnd->capturing)
snd_pcm_period_elapsed(gosnd->substream);
}
static int go7007_snd_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct go7007 *go = snd_pcm_substream_chip(substream);
unsigned int bytes;
bytes = params_buffer_bytes(hw_params);
if (substream->runtime->dma_bytes > 0)
vfree(substream->runtime->dma_area);
substream->runtime->dma_bytes = 0;
substream->runtime->dma_area = vmalloc(bytes);
if (substream->runtime->dma_area == NULL)
return -ENOMEM;
substream->runtime->dma_bytes = bytes;
go->audio_deliver = parse_audio_stream_data;
return 0;
}
static int go7007_snd_hw_free(struct snd_pcm_substream *substream)
{
struct go7007 *go = snd_pcm_substream_chip(substream);
go->audio_deliver = NULL;
if (substream->runtime->dma_bytes > 0)
vfree(substream->runtime->dma_area);
substream->runtime->dma_bytes = 0;
return 0;
}
static int go7007_snd_capture_open(struct snd_pcm_substream *substream)
{
struct go7007 *go = snd_pcm_substream_chip(substream);
struct go7007_snd *gosnd = go->snd_context;
unsigned long flags;
int r;
spin_lock_irqsave(&gosnd->lock, flags);
if (gosnd->substream == NULL) {
gosnd->substream = substream;
substream->runtime->hw = go7007_snd_capture_hw;
r = 0;
} else
r = -EBUSY;
spin_unlock_irqrestore(&gosnd->lock, flags);
return r;
}
static int go7007_snd_capture_close(struct snd_pcm_substream *substream)
{
struct go7007 *go = snd_pcm_substream_chip(substream);
struct go7007_snd *gosnd = go->snd_context;
gosnd->substream = NULL;
return 0;
}
static int go7007_snd_pcm_prepare(struct snd_pcm_substream *substream)
{
return 0;
}
static int go7007_snd_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct go7007 *go = snd_pcm_substream_chip(substream);
struct go7007_snd *gosnd = go->snd_context;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
/* Just set a flag to indicate we should signal ALSA when
* sound comes in */
gosnd->capturing = 1;
return 0;
case SNDRV_PCM_TRIGGER_STOP:
gosnd->hw_ptr = gosnd->w_idx = gosnd->avail = 0;
gosnd->capturing = 0;
return 0;
default:
return -EINVAL;
}
}
static snd_pcm_uframes_t go7007_snd_pcm_pointer(struct snd_pcm_substream *substream)
{
struct go7007 *go = snd_pcm_substream_chip(substream);
struct go7007_snd *gosnd = go->snd_context;
return gosnd->hw_ptr;
}
static struct page *go7007_snd_pcm_page(struct snd_pcm_substream *substream,
unsigned long offset)
{
return vmalloc_to_page(substream->runtime->dma_area + offset);
}
static struct snd_pcm_ops go7007_snd_capture_ops = {
.open = go7007_snd_capture_open,
.close = go7007_snd_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = go7007_snd_hw_params,
.hw_free = go7007_snd_hw_free,
.prepare = go7007_snd_pcm_prepare,
.trigger = go7007_snd_pcm_trigger,
.pointer = go7007_snd_pcm_pointer,
.page = go7007_snd_pcm_page,
};
static int go7007_snd_free(struct snd_device *device)
{
struct go7007 *go = device->device_data;
kfree(go->snd_context);
go->snd_context = NULL;
if (--go->ref_count == 0)
kfree(go);
return 0;
}
static struct snd_device_ops go7007_snd_device_ops = {
.dev_free = go7007_snd_free,
};
int go7007_snd_init(struct go7007 *go)
{
static int dev;
struct go7007_snd *gosnd;
int ret = 0;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
gosnd = kmalloc(sizeof(struct go7007_snd), GFP_KERNEL);
if (gosnd == NULL)
return -ENOMEM;
spin_lock_init(&gosnd->lock);
gosnd->hw_ptr = gosnd->w_idx = gosnd->avail = 0;
gosnd->capturing = 0;
ret = snd_card_create(index[dev], id[dev], THIS_MODULE, 0,
&gosnd->card);
if (ret < 0) {
kfree(gosnd);
return ret;
}
ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go,
&go7007_snd_device_ops);
if (ret < 0) {
kfree(gosnd);
return ret;
}
snd_card_set_dev(gosnd->card, go->dev);
ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm);
if (ret < 0) {
snd_card_free(gosnd->card);
kfree(gosnd);
return ret;
}
strncpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver));
strncpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver));
strncpy(gosnd->card->longname, gosnd->card->shortname,
sizeof(gosnd->card->longname));
gosnd->pcm->private_data = go;
snd_pcm_set_ops(gosnd->pcm, SNDRV_PCM_STREAM_CAPTURE,
&go7007_snd_capture_ops);
ret = snd_card_register(gosnd->card);
if (ret < 0) {
snd_card_free(gosnd->card);
kfree(gosnd);
return ret;
}
gosnd->substream = NULL;
go->snd_context = gosnd;
++dev;
++go->ref_count;
return 0;
}
EXPORT_SYMBOL(go7007_snd_init);
int go7007_snd_remove(struct go7007 *go)
{
struct go7007_snd *gosnd = go->snd_context;
snd_card_disconnect(gosnd->card);
snd_card_free_when_closed(gosnd->card);
return 0;
}
EXPORT_SYMBOL(go7007_snd_remove);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
LGaljo/android_kernel_samsung_s3ve3g | arch/powerpc/kernel/pci-common.c | 4446 | 49031 | /*
* Contains common pci routines for ALL ppc platform
* (based on pci_32.c and pci_64.c)
*
* Port for PPC64 David Engebretsen, IBM Corp.
* Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
*
* Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
* Rework, based on alpha PCI code.
*
* Common pmac/prep/chrp pci routines. -- Cort
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/mm.h>
#include <linux/list.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
static DEFINE_SPINLOCK(hose_spinlock);
LIST_HEAD(hose_list);
/* XXX kill that some day ... */
static int global_phb_number; /* Global phb counter */
/* ISA Memory physical address */
resource_size_t isa_mem_base;
static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
void set_pci_dma_ops(struct dma_map_ops *dma_ops)
{
pci_dma_ops = dma_ops;
}
struct dma_map_ops *get_pci_dma_ops(void)
{
return pci_dma_ops;
}
EXPORT_SYMBOL(get_pci_dma_ops);
struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
{
struct pci_controller *phb;
phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
if (phb == NULL)
return NULL;
spin_lock(&hose_spinlock);
phb->global_number = global_phb_number++;
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
phb->dn = dev;
phb->is_dynamic = mem_init_done;
#ifdef CONFIG_PPC64
if (dev) {
int nid = of_node_to_nid(dev);
if (nid < 0 || !node_online(nid))
nid = -1;
PHB_SET_NODE(phb, nid);
}
#endif
return phb;
}
void pcibios_free_controller(struct pci_controller *phb)
{
spin_lock(&hose_spinlock);
list_del(&phb->list_node);
spin_unlock(&hose_spinlock);
if (phb->is_dynamic)
kfree(phb);
}
static resource_size_t pcibios_io_size(const struct pci_controller *hose)
{
#ifdef CONFIG_PPC64
return hose->pci_io_size;
#else
return resource_size(&hose->io_resource);
#endif
}
int pcibios_vaddr_is_ioport(void __iomem *address)
{
int ret = 0;
struct pci_controller *hose;
resource_size_t size;
spin_lock(&hose_spinlock);
list_for_each_entry(hose, &hose_list, list_node) {
size = pcibios_io_size(hose);
if (address >= hose->io_base_virt &&
address < (hose->io_base_virt + size)) {
ret = 1;
break;
}
}
spin_unlock(&hose_spinlock);
return ret;
}
unsigned long pci_address_to_pio(phys_addr_t address)
{
struct pci_controller *hose;
resource_size_t size;
unsigned long ret = ~0;
spin_lock(&hose_spinlock);
list_for_each_entry(hose, &hose_list, list_node) {
size = pcibios_io_size(hose);
if (address >= hose->io_base_phys &&
address < (hose->io_base_phys + size)) {
unsigned long base =
(unsigned long)hose->io_base_virt - _IO_BASE;
ret = base + (address - hose->io_base_phys);
break;
}
}
spin_unlock(&hose_spinlock);
return ret;
}
EXPORT_SYMBOL_GPL(pci_address_to_pio);
/*
* Return the domain number for this bus.
*/
int pci_domain_nr(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
return hose->global_number;
}
EXPORT_SYMBOL(pci_domain_nr);
/* This routine is meant to be used early during boot, when the
* PCI bus numbers have not yet been assigned, and you need to
* issue PCI config cycles to an OF device.
* It could also be used to "fix" RTAS config cycles if you want
* to set pci_assign_all_buses to 1 and still use RTAS for PCI
* config cycles.
*/
struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
{
while(node) {
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
if (hose->dn == node)
return hose;
node = node->parent;
}
return NULL;
}
static ssize_t pci_show_devspec(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
struct device_node *np;
pdev = to_pci_dev (dev);
np = pci_device_to_OF_node(pdev);
if (np == NULL || np->full_name == NULL)
return 0;
return sprintf(buf, "%s", np->full_name);
}
static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
/* Add sysfs properties */
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return device_create_file(&pdev->dev, &dev_attr_devspec);
}
char __devinit *pcibios_setup(char *str)
{
return str;
}
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
* openfirmware and sets it in the pci_dev and pci_config line.
*/
static int pci_read_irq_line(struct pci_dev *pci_dev)
{
struct of_irq oirq;
unsigned int virq;
pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
#ifdef DEBUG
memset(&oirq, 0xff, sizeof(oirq));
#endif
/* Try to get a mapping from the device-tree */
if (of_irq_map_pci(pci_dev, &oirq)) {
u8 line, pin;
/* If that fails, lets fallback to what is in the config
* space and map that through the default controller. We
* also set the type to level low since that's what PCI
* interrupts are. If your platform does differently, then
* either provide a proper interrupt tree or don't use this
* function.
*/
if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
return -1;
if (pin == 0)
return -1;
if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
line == 0xff || line == 0) {
return -1;
}
pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
line, pin);
virq = irq_create_mapping(NULL, line);
if (virq != NO_IRQ)
irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
oirq.controller ? oirq.controller->full_name :
"<default>");
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
}
if(virq == NO_IRQ) {
pr_debug(" Failed to map !\n");
return -1;
}
pr_debug(" Mapped to linux irq %d\n", virq);
pci_dev->irq = virq;
return 0;
}
/*
* Platform support for /proc/bus/pci/X/Y mmap()s,
* modelled on the sparc64 implementation by Dave Miller.
* -- paulus.
*/
/*
* Adjust vm_pgoff of VMA such that it is the physical page offset
* corresponding to the 32-bit pci bus offset for DEV requested by the user.
*
* Basically, the user finds the base address for his device which he wishes
* to mmap. They read the 32-bit value from the config space base register,
* add whatever PAGE_SIZE multiple offset they wish, and feed this into the
* offset parameter of mmap on /proc/bus/pci/XXX for that device.
*
* Returns negative error code on failure, zero on success.
*/
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
resource_size_t *offset,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
unsigned long io_offset = 0;
int i, res_bit;
if (hose == 0)
return NULL; /* should never happen */
/* If memory, add on the PCI bridge address offset */
if (mmap_state == pci_mmap_mem) {
#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
*offset += hose->pci_mem_offset;
#endif
res_bit = IORESOURCE_MEM;
} else {
io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
*offset += io_offset;
res_bit = IORESOURCE_IO;
}
/*
* Check that the offset requested corresponds to one of the
* resources of the device.
*/
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
struct resource *rp = &dev->resource[i];
int flags = rp->flags;
/* treat ROM as memory (should be already) */
if (i == PCI_ROM_RESOURCE)
flags |= IORESOURCE_MEM;
/* Active and same type? */
if ((flags & res_bit) == 0)
continue;
/* In the range of this resource? */
if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
continue;
/* found it! construct the final physical address */
if (mmap_state == pci_mmap_io)
*offset += hose->io_base_phys - io_offset;
return rp;
}
return NULL;
}
/*
* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
* device mapping.
*/
static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
pgprot_t protection,
enum pci_mmap_state mmap_state,
int write_combine)
{
unsigned long prot = pgprot_val(protection);
/* Write combine is always 0 on non-memory space mappings. On
* memory space, if the user didn't pass 1, we check for a
* "prefetchable" resource. This is a bit hackish, but we use
* this to workaround the inability of /sysfs to provide a write
* combine bit
*/
if (mmap_state != pci_mmap_mem)
write_combine = 0;
else if (write_combine == 0) {
if (rp->flags & IORESOURCE_PREFETCH)
write_combine = 1;
}
/* XXX would be nice to have a way to ask for write-through */
if (write_combine)
return pgprot_noncached_wc(prot);
else
return pgprot_noncached(prot);
}
/*
* This one is used by /dev/mem and fbdev who have no clue about the
* PCI device, it tries to find the PCI device first and calls the
* above routine
*/
pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
unsigned long size,
pgprot_t prot)
{
struct pci_dev *pdev = NULL;
struct resource *found = NULL;
resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
int i;
if (page_is_ram(pfn))
return prot;
prot = pgprot_noncached(prot);
for_each_pci_dev(pdev) {
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
struct resource *rp = &pdev->resource[i];
int flags = rp->flags;
/* Active and same type? */
if ((flags & IORESOURCE_MEM) == 0)
continue;
/* In the range of this resource? */
if (offset < (rp->start & PAGE_MASK) ||
offset > rp->end)
continue;
found = rp;
break;
}
if (found)
break;
}
if (found) {
if (found->flags & IORESOURCE_PREFETCH)
prot = pgprot_noncached_wc(prot);
pci_dev_put(pdev);
}
pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
(unsigned long long)offset, pgprot_val(prot));
return prot;
}
/*
* Perform the actual remap of the pages for a PCI device mapping, as
* appropriate for this architecture. The region in the process to map
* is described by vm_start and vm_end members of VMA, the base physical
* address is found in vm_pgoff.
* The pci device structure is provided so that architectures may make mapping
* decisions on a per-device or per-bus basis.
*
* Returns a negative error code on failure, zero on success.
*/
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t offset =
((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
struct resource *rp;
int ret;
rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
if (rp == NULL)
return -EINVAL;
vma->vm_pgoff = offset >> PAGE_SHIFT;
vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
vma->vm_page_prot,
mmap_state, write_combine);
ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
return ret;
}
/* This provides legacy IO read access on a bus */
int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
{
unsigned long offset;
struct pci_controller *hose = pci_bus_to_host(bus);
struct resource *rp = &hose->io_resource;
void __iomem *addr;
/* Check if port can be supported by that bus. We only check
* the ranges of the PHB though, not the bus itself as the rules
* for forwarding legacy cycles down bridges are not our problem
* here. So if the host bridge supports it, we do it.
*/
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
offset += port;
if (!(rp->flags & IORESOURCE_IO))
return -ENXIO;
if (offset < rp->start || (offset + size) > rp->end)
return -ENXIO;
addr = hose->io_base_virt + port;
switch(size) {
case 1:
*((u8 *)val) = in_8(addr);
return 1;
case 2:
if (port & 1)
return -EINVAL;
*((u16 *)val) = in_le16(addr);
return 2;
case 4:
if (port & 3)
return -EINVAL;
*((u32 *)val) = in_le32(addr);
return 4;
}
return -EINVAL;
}
/* This provides legacy IO write access on a bus */
int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
{
unsigned long offset;
struct pci_controller *hose = pci_bus_to_host(bus);
struct resource *rp = &hose->io_resource;
void __iomem *addr;
/* Check if port can be supported by that bus. We only check
* the ranges of the PHB though, not the bus itself as the rules
* for forwarding legacy cycles down bridges are not our problem
* here. So if the host bridge supports it, we do it.
*/
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
offset += port;
if (!(rp->flags & IORESOURCE_IO))
return -ENXIO;
if (offset < rp->start || (offset + size) > rp->end)
return -ENXIO;
addr = hose->io_base_virt + port;
/* WARNING: The generic code is idiotic. It gets passed a pointer
* to what can be a 1, 2 or 4 byte quantity and always reads that
* as a u32, which means that we have to correct the location of
* the data read within those 32 bits for size 1 and 2
*/
switch(size) {
case 1:
out_8(addr, val >> 24);
return 1;
case 2:
if (port & 1)
return -EINVAL;
out_le16(addr, val >> 16);
return 2;
case 4:
if (port & 3)
return -EINVAL;
out_le32(addr, val);
return 4;
}
return -EINVAL;
}
/* This provides legacy IO or memory mmap access on a bus */
int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_host(bus);
resource_size_t offset =
((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
resource_size_t size = vma->vm_end - vma->vm_start;
struct resource *rp;
pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
pci_domain_nr(bus), bus->number,
mmap_state == pci_mmap_mem ? "MEM" : "IO",
(unsigned long long)offset,
(unsigned long long)(offset + size - 1));
if (mmap_state == pci_mmap_mem) {
/* Hack alert !
*
* Because X is lame and can fail starting if it gets an error trying
* to mmap legacy_mem (instead of just moving on without legacy memory
* access) we fake it here by giving it anonymous memory, effectively
* behaving just like /dev/zero
*/
if ((offset + size) > hose->isa_mem_size) {
printk(KERN_DEBUG
"Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
current->comm, current->pid, pci_domain_nr(bus), bus->number);
if (vma->vm_flags & VM_SHARED)
return shmem_zero_setup(vma);
return 0;
}
offset += hose->isa_mem_phys;
} else {
unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
unsigned long roffset = offset + io_offset;
rp = &hose->io_resource;
if (!(rp->flags & IORESOURCE_IO))
return -ENXIO;
if (roffset < rp->start || (roffset + size) > rp->end)
return -ENXIO;
offset += hose->io_base_phys;
}
pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
vma->vm_pgoff = offset >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
resource_size_t offset = 0;
if (hose == NULL)
return;
if (rsrc->flags & IORESOURCE_IO)
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
/* We pass a fully fixed up address to userland for MMIO instead of
* a BAR value because X is lame and expects to be able to use that
* to pass to /dev/mem !
*
* That means that we'll have potentially 64 bits values where some
* userland apps only expect 32 (like X itself since it thinks only
* Sparc has 64 bits MMIO) but if we don't do that, we break it on
* 32 bits CHRPs :-(
*
* Hopefully, the sysfs insterface is immune to that gunk. Once X
* has been fixed (and the fix spread enough), we can re-enable the
* 2 lines below and pass down a BAR value to userland. In that case
* we'll also have to re-enable the matching code in
* __pci_mmap_make_offset().
*
* BenH.
*/
#if 0
else if (rsrc->flags & IORESOURCE_MEM)
offset = hose->pci_mem_offset;
#endif
*start = rsrc->start - offset;
*end = rsrc->end - offset;
}
/**
* pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
* @hose: newly allocated pci_controller to be setup
* @dev: device node of the host bridge
* @primary: set if primary bus (32 bits only, soon to be deprecated)
*
* This function will parse the "ranges" property of a PCI host bridge device
* node and setup the resource mapping of a pci controller based on its
* content.
*
* Life would be boring if it wasn't for a few issues that we have to deal
* with here:
*
* - We can only cope with one IO space range and up to 3 Memory space
* ranges. However, some machines (thanks Apple !) tend to split their
* space into lots of small contiguous ranges. So we have to coalesce.
*
* - We can only cope with all memory ranges having the same offset
* between CPU addresses and PCI addresses. Unfortunately, some bridges
* are setup for a large 1:1 mapping along with a small "window" which
* maps PCI address 0 to some arbitrary high address of the CPU space in
* order to give access to the ISA memory hole.
* The way out of here that I've chosen for now is to always set the
* offset based on the first resource found, then override it if we
* have a different offset and the previous was set by an ISA hole.
*
* - Some busses have IO space not starting at 0, which causes trouble with
* the way we do our IO resource renumbering. The code somewhat deals with
* it for 64 bits but I would expect problems on 32 bits.
*
* - Some 32 bits platforms such as 4xx can have physical space larger than
* 32 bits so we need to use 64 bits values for the parsing
*/
void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
struct device_node *dev,
int primary)
{
const u32 *ranges;
int rlen;
int pna = of_n_addr_cells(dev);
int np = pna + 5;
int memno = 0, isa_hole = -1;
u32 pci_space;
unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
unsigned long long isa_mb = 0;
struct resource *res;
printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
dev->full_name, primary ? "(primary)" : "");
/* Get ranges property */
ranges = of_get_property(dev, "ranges", &rlen);
if (ranges == NULL)
return;
/* Parse it */
while ((rlen -= np * 4) >= 0) {
/* Read next ranges element */
pci_space = ranges[0];
pci_addr = of_read_number(ranges + 1, 2);
cpu_addr = of_translate_address(dev, ranges + 3);
size = of_read_number(ranges + pna + 3, 2);
ranges += np;
/* If we failed translation or got a zero-sized region
* (some FW try to feed us with non sensical zero sized regions
* such as power3 which look like some kind of attempt at exposing
* the VGA memory hole)
*/
if (cpu_addr == OF_BAD_ADDR || size == 0)
continue;
/* Now consume following elements while they are contiguous */
for (; rlen >= np * sizeof(u32);
ranges += np, rlen -= np * 4) {
if (ranges[0] != pci_space)
break;
pci_next = of_read_number(ranges + 1, 2);
cpu_next = of_translate_address(dev, ranges + 3);
if (pci_next != pci_addr + size ||
cpu_next != cpu_addr + size)
break;
size += of_read_number(ranges + pna + 3, 2);
}
/* Act based on address space type */
res = NULL;
switch ((pci_space >> 24) & 0x3) {
case 1: /* PCI IO space */
printk(KERN_INFO
" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
cpu_addr, cpu_addr + size - 1, pci_addr);
/* We support only one IO range */
if (hose->pci_io_size) {
printk(KERN_INFO
" \\--> Skipped (too many) !\n");
continue;
}
#ifdef CONFIG_PPC32
/* On 32 bits, limit I/O space to 16MB */
if (size > 0x01000000)
size = 0x01000000;
/* 32 bits needs to map IOs here */
hose->io_base_virt = ioremap(cpu_addr, size);
/* Expect trouble if pci_addr is not 0 */
if (primary)
isa_io_base =
(unsigned long)hose->io_base_virt;
#endif /* CONFIG_PPC32 */
/* pci_io_size and io_base_phys always represent IO
* space starting at 0 so we factor in pci_addr
*/
hose->pci_io_size = pci_addr + size;
hose->io_base_phys = cpu_addr - pci_addr;
/* Build resource */
res = &hose->io_resource;
res->flags = IORESOURCE_IO;
res->start = pci_addr;
break;
case 2: /* PCI Memory space */
case 3: /* PCI 64 bits Memory space */
printk(KERN_INFO
" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
cpu_addr, cpu_addr + size - 1, pci_addr,
(pci_space & 0x40000000) ? "Prefetch" : "");
/* We support only 3 memory ranges */
if (memno >= 3) {
printk(KERN_INFO
" \\--> Skipped (too many) !\n");
continue;
}
/* Handles ISA memory hole space here */
if (pci_addr == 0) {
isa_mb = cpu_addr;
isa_hole = memno;
if (primary || isa_mem_base == 0)
isa_mem_base = cpu_addr;
hose->isa_mem_phys = cpu_addr;
hose->isa_mem_size = size;
}
/* We get the PCI/Mem offset from the first range or
* the, current one if the offset came from an ISA
* hole. If they don't match, bugger.
*/
if (memno == 0 ||
(isa_hole >= 0 && pci_addr != 0 &&
hose->pci_mem_offset == isa_mb))
hose->pci_mem_offset = cpu_addr - pci_addr;
else if (pci_addr != 0 &&
hose->pci_mem_offset != cpu_addr - pci_addr) {
printk(KERN_INFO
" \\--> Skipped (offset mismatch) !\n");
continue;
}
/* Build resource */
res = &hose->mem_resources[memno++];
res->flags = IORESOURCE_MEM;
if (pci_space & 0x40000000)
res->flags |= IORESOURCE_PREFETCH;
res->start = cpu_addr;
break;
}
if (res != NULL) {
res->name = dev->full_name;
res->end = res->start + size - 1;
res->parent = NULL;
res->sibling = NULL;
res->child = NULL;
}
}
/* If there's an ISA hole and the pci_mem_offset is -not- matching
* the ISA hole offset, then we need to remove the ISA hole from
* the resource list for that brige
*/
if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
unsigned int next = isa_hole + 1;
printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
if (next < memno)
memmove(&hose->mem_resources[isa_hole],
&hose->mem_resources[next],
sizeof(struct resource) * (memno - next));
hose->mem_resources[--memno].flags = 0;
}
}
/* Decide whether to display the domain number in /proc */
int pci_proc_domain(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
return 0;
if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
return hose->global_number != 0;
return 1;
}
/* This header fixup will do the resource fixup for all devices as they are
* probed, but not for bridge ranges
*/
static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
int i;
if (!hose) {
printk(KERN_ERR "No host bridge for PCI dev %s !\n",
pci_name(dev));
return;
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
if (!res->flags)
continue;
/* If we're going to re-assign everything, we mark all resources
* as unset (and 0-base them). In addition, we mark BARs starting
* at 0 as unset as well, except if PCI_PROBE_ONLY is also set
* since in that case, we don't want to re-assign anything
*/
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
(res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
/* Only print message if not re-assigning */
if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
"is unassigned\n",
pci_name(dev), i,
(unsigned long long)res->start,
(unsigned long long)res->end,
(unsigned int)res->flags);
res->end -= res->start;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
continue;
}
pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
pci_name(dev), i,
(unsigned long long)res->start,\
(unsigned long long)res->end,
(unsigned int)res->flags);
}
/* Call machine specific resource fixup */
if (ppc_md.pcibios_fixup_resources)
ppc_md.pcibios_fixup_resources(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
/* This function tries to figure out if a bridge resource has been initialized
* by the firmware or not. It doesn't have to be absolutely bullet proof, but
* things go more smoothly when it gets it right. It should covers cases such
* as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
*/
static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
struct resource *res)
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct pci_dev *dev = bus->self;
resource_size_t offset;
u16 command;
int i;
/* We don't do anything if PCI_PROBE_ONLY is set */
if (pci_has_flag(PCI_PROBE_ONLY))
return 0;
/* Job is a bit different between memory and IO */
if (res->flags & IORESOURCE_MEM) {
/* If the BAR is non-0 (res != pci_mem_offset) then it's probably been
* initialized by somebody
*/
if (res->start != hose->pci_mem_offset)
return 0;
/* The BAR is 0, let's check if memory decoding is enabled on
* the bridge. If not, we consider it unassigned
*/
pci_read_config_word(dev, PCI_COMMAND, &command);
if ((command & PCI_COMMAND_MEMORY) == 0)
return 1;
/* Memory decoding is enabled and the BAR is 0. If any of the bridge
* resources covers that starting address (0 then it's good enough for
* us for memory
*/
for (i = 0; i < 3; i++) {
if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
hose->mem_resources[i].start == hose->pci_mem_offset)
return 0;
}
/* Well, it starts at 0 and we know it will collide so we may as
* well consider it as unassigned. That covers the Apple case.
*/
return 1;
} else {
/* If the BAR is non-0, then we consider it assigned */
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
if (((res->start - offset) & 0xfffffffful) != 0)
return 0;
/* Here, we are a bit different than memory as typically IO space
* starting at low addresses -is- valid. What we do instead if that
* we consider as unassigned anything that doesn't have IO enabled
* in the PCI command register, and that's it.
*/
pci_read_config_word(dev, PCI_COMMAND, &command);
if (command & PCI_COMMAND_IO)
return 0;
/* It's starting at 0 and IO is disabled in the bridge, consider
* it unassigned
*/
return 1;
}
}
/* Fixup resources of a PCI<->PCI bridge */
static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
{
struct resource *res;
int i;
struct pci_dev *dev = bus->self;
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->flags)
continue;
if (i >= 3 && bus->self->transparent)
continue;
/* If we are going to re-assign everything, mark the resource
* as unset and move it down to 0
*/
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
res->flags |= IORESOURCE_UNSET;
res->end -= res->start;
res->start = 0;
continue;
}
pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
pci_name(dev), i,
(unsigned long long)res->start,\
(unsigned long long)res->end,
(unsigned int)res->flags);
/* Try to detect uninitialized P2P bridge resources,
* and clear them out so they get re-assigned later
*/
if (pcibios_uninitialized_bridge_resource(bus, res)) {
res->flags = 0;
pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
}
}
}
void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
{
/* Fix up the bus resources for P2P bridges */
if (bus->self != NULL)
pcibios_fixup_bridge(bus);
/* Platform specific bus fixups. This is currently only used
* by fsl_pci and I'm hoping to get rid of it at some point
*/
if (ppc_md.pcibios_fixup_bus)
ppc_md.pcibios_fixup_bus(bus);
/* Setup bus DMA mappings */
if (ppc_md.pci_dma_bus_setup)
ppc_md.pci_dma_bus_setup(bus);
}
void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
{
struct pci_dev *dev;
pr_debug("PCI: Fixup bus devices %d (%s)\n",
bus->number, bus->self ? pci_name(bus->self) : "PHB");
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Cardbus can call us to add new devices to a bus, so ignore
* those who are already fully discovered
*/
if (dev->is_added)
continue;
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
*/
set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
/* Hook up default DMA ops */
set_dma_ops(&dev->dev, pci_dma_ops);
set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
/* Additional platform DMA/iommu setup */
if (ppc_md.pci_dma_dev_setup)
ppc_md.pci_dma_dev_setup(dev);
/* Read default IRQs and fixup if necessary */
pci_read_irq_line(dev);
if (ppc_md.pci_irq_fixup)
ppc_md.pci_irq_fixup(dev);
}
}
void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
/* When called from the generic PCI probe, read PCI<->PCI bridge
* bases. This is -not- called when generating the PCI tree from
* the OF device-tree.
*/
if (bus->self != NULL)
pci_read_bridge_bases(bus);
/* Now fixup the bus bus */
pcibios_setup_bus_self(bus);
/* Now fixup devices on that bus */
pcibios_setup_bus_devices(bus);
}
EXPORT_SYMBOL(pcibios_fixup_bus);
void __devinit pci_fixup_cardbus(struct pci_bus *bus)
{
/* Now fixup devices on that bus */
pcibios_setup_bus_devices(bus);
}
static int skip_isa_ioresource_align(struct pci_dev *dev)
{
if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
!(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
return 1;
return 0;
}
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*
* Why? Because some silly external IO cards only decode
* the low 10 bits of the IO address. The 0x00-0xff region
* is reserved for motherboard devices that decode all 16
* bits, so it's ok to allocate at, say, 0x2800-0x28ff,
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
if (skip_isa_ioresource_align(dev))
return start;
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
}
return start;
}
EXPORT_SYMBOL(pcibios_align_resource);
/*
* Reparent resource children of pr that conflict with res
* under res, and make res replace those children.
*/
static int reparent_resources(struct resource *parent,
struct resource *res)
{
struct resource *p, **pp;
struct resource **firstpp = NULL;
for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
if (p->end < res->start)
continue;
if (res->end < p->start)
break;
if (p->start < res->start || p->end > res->end)
return -1; /* not completely contained */
if (firstpp == NULL)
firstpp = pp;
}
if (firstpp == NULL)
return -1; /* didn't find any conflicting entries? */
res->parent = parent;
res->child = *firstpp;
res->sibling = *pp;
*firstpp = res;
*pp = NULL;
for (p = res->child; p != NULL; p = p->sibling) {
p->parent = res;
pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
p->name,
(unsigned long long)p->start,
(unsigned long long)p->end, res->name);
}
return 0;
}
/*
* Handle resources of PCI devices. If the world were perfect, we could
* just allocate all the resource regions and do nothing more. It isn't.
* On the other hand, we cannot just re-allocate all devices, as it would
* require us to know lots of host bridge internals. So we attempt to
* keep as much of the original configuration as possible, but tweak it
* when it's found to be wrong.
*
* Known BIOS problems we have to work around:
* - I/O or memory regions not configured
* - regions configured, but not enabled in the command register
* - bogus I/O addresses above 64K used
* - expansion ROMs left enabled (this may sound harmless, but given
* the fact the PCI specs explicitly allow address decoders to be
* shared between expansion ROMs and other resource regions, it's
* at least dangerous)
*
* Our solution:
* (1) Allocate resources for all buses behind PCI-to-PCI bridges.
* This gives us fixed barriers on where we can allocate.
* (2) Allocate resources for all enabled devices. If there is
* a collision, just mark the resource as unallocated. Also
* disable expansion ROMs during this step.
* (3) Try to allocate resources for disabled devices. If the
* resources were assigned correctly, everything goes well,
* if they weren't, they won't disturb allocation of other
* resources.
* (4) Assign new addresses to resources which were either
* not configured at all or misconfigured. If explicitly
* requested by the user, configure expansion ROM address
* as well.
*/
void pcibios_allocate_bus_resources(struct pci_bus *bus)
{
struct pci_bus *b;
int i;
struct resource *res, *pr;
pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
pci_domain_nr(bus), bus->number);
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->flags || res->start > res->end || res->parent)
continue;
/* If the resource was left unset at this point, we clear it */
if (res->flags & IORESOURCE_UNSET)
goto clear_resource;
if (bus->parent == NULL)
pr = (res->flags & IORESOURCE_IO) ?
&ioport_resource : &iomem_resource;
else {
pr = pci_find_parent_resource(bus->self, res);
if (pr == res) {
/* this happens when the generic PCI
* code (wrongly) decides that this
* bridge is transparent -- paulus
*/
continue;
}
}
pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
"[0x%x], parent %p (%s)\n",
bus->self ? pci_name(bus->self) : "PHB",
bus->number, i,
(unsigned long long)res->start,
(unsigned long long)res->end,
(unsigned int)res->flags,
pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
if (request_resource(pr, res) == 0)
continue;
/*
* Must be a conflict with an existing entry.
* Move that entry (or entries) under the
* bridge resource and try again.
*/
if (reparent_resources(pr, res) == 0)
continue;
}
pr_warning("PCI: Cannot allocate resource region "
"%d of PCI bridge %d, will remap\n", i, bus->number);
clear_resource:
res->start = res->end = 0;
res->flags = 0;
}
list_for_each_entry(b, &bus->children, node)
pcibios_allocate_bus_resources(b);
}
static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
{
struct resource *pr, *r = &dev->resource[idx];
pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
pci_name(dev), idx,
(unsigned long long)r->start,
(unsigned long long)r->end,
(unsigned int)r->flags);
pr = pci_find_parent_resource(dev, r);
if (!pr || (pr->flags & IORESOURCE_UNSET) ||
request_resource(pr, r) < 0) {
printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
" of device %s, will remap\n", idx, pci_name(dev));
if (pr)
pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
pr,
(unsigned long long)pr->start,
(unsigned long long)pr->end,
(unsigned int)pr->flags);
/* We'll assign a new address later */
r->flags |= IORESOURCE_UNSET;
r->end -= r->start;
r->start = 0;
}
}
static void __init pcibios_allocate_resources(int pass)
{
struct pci_dev *dev = NULL;
int idx, disabled;
u16 command;
struct resource *r;
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
r = &dev->resource[idx];
if (r->parent) /* Already allocated */
continue;
if (!r->flags || (r->flags & IORESOURCE_UNSET))
continue; /* Not assigned at all */
/* We only allocate ROMs on pass 1 just in case they
* have been screwed up by firmware
*/
if (idx == PCI_ROM_RESOURCE )
disabled = 1;
if (r->flags & IORESOURCE_IO)
disabled = !(command & PCI_COMMAND_IO);
else
disabled = !(command & PCI_COMMAND_MEMORY);
if (pass == disabled)
alloc_resource(dev, idx);
}
if (pass)
continue;
r = &dev->resource[PCI_ROM_RESOURCE];
if (r->flags) {
/* Turn the ROM off, leave the resource region,
* but keep it unregistered.
*/
u32 reg;
pci_read_config_dword(dev, dev->rom_base_reg, ®);
if (reg & PCI_ROM_ADDRESS_ENABLE) {
pr_debug("PCI: Switching off ROM of %s\n",
pci_name(dev));
r->flags &= ~IORESOURCE_ROM_ENABLE;
pci_write_config_dword(dev, dev->rom_base_reg,
reg & ~PCI_ROM_ADDRESS_ENABLE);
}
}
}
}
static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
resource_size_t offset;
struct resource *res, *pres;
int i;
pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
/* Check for IO */
if (!(hose->io_resource.flags & IORESOURCE_IO))
goto no_io;
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
BUG_ON(res == NULL);
res->name = "Legacy IO";
res->flags = IORESOURCE_IO;
res->start = offset;
res->end = (offset + 0xfff) & 0xfffffffful;
pr_debug("Candidate legacy IO: %pR\n", res);
if (request_resource(&hose->io_resource, res)) {
printk(KERN_DEBUG
"PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
pci_domain_nr(bus), bus->number, res);
kfree(res);
}
no_io:
/* Check for memory */
offset = hose->pci_mem_offset;
pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
for (i = 0; i < 3; i++) {
pres = &hose->mem_resources[i];
if (!(pres->flags & IORESOURCE_MEM))
continue;
pr_debug("hose mem res: %pR\n", pres);
if ((pres->start - offset) <= 0xa0000 &&
(pres->end - offset) >= 0xbffff)
break;
}
if (i >= 3)
return;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
BUG_ON(res == NULL);
res->name = "Legacy VGA memory";
res->flags = IORESOURCE_MEM;
res->start = 0xa0000 + offset;
res->end = 0xbffff + offset;
pr_debug("Candidate VGA memory: %pR\n", res);
if (request_resource(pres, res)) {
printk(KERN_DEBUG
"PCI %04x:%02x Cannot reserve VGA memory %pR\n",
pci_domain_nr(bus), bus->number, res);
kfree(res);
}
}
void __init pcibios_resource_survey(void)
{
struct pci_bus *b;
/* Allocate and assign resources */
list_for_each_entry(b, &pci_root_buses, node)
pcibios_allocate_bus_resources(b);
pcibios_allocate_resources(0);
pcibios_allocate_resources(1);
/* Before we start assigning unassigned resource, we try to reserve
* the low IO area and the VGA memory area if they intersect the
* bus available resources to avoid allocating things on top of them
*/
if (!pci_has_flag(PCI_PROBE_ONLY)) {
list_for_each_entry(b, &pci_root_buses, node)
pcibios_reserve_legacy_regions(b);
}
/* Now, if the platform didn't decide to blindly trust the firmware,
* we proceed to assigning things that were left unassigned
*/
if (!pci_has_flag(PCI_PROBE_ONLY)) {
pr_debug("PCI: Assigning unassigned resources...\n");
pci_assign_unassigned_resources();
}
/* Call machine dependent fixup */
if (ppc_md.pcibios_fixup)
ppc_md.pcibios_fixup();
}
#ifdef CONFIG_HOTPLUG
/* This is used by the PCI hotplug driver to allocate resource
* of newly plugged busses. We can try to consolidate with the
* rest of the code later, for now, keep it as-is as our main
* resource allocation function doesn't deal with sub-trees yet.
*/
void pcibios_claim_one_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child_bus;
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *r = &dev->resource[i];
if (r->parent || !r->start || !r->flags)
continue;
pr_debug("PCI: Claiming %s: "
"Resource %d: %016llx..%016llx [%x]\n",
pci_name(dev), i,
(unsigned long long)r->start,
(unsigned long long)r->end,
(unsigned int)r->flags);
pci_claim_resource(dev, i);
}
}
list_for_each_entry(child_bus, &bus->children, node)
pcibios_claim_one_bus(child_bus);
}
/* pcibios_finish_adding_to_bus
*
* This is to be called by the hotplug code after devices have been
* added to a bus, this include calling it for a PHB that is just
* being added
*/
void pcibios_finish_adding_to_bus(struct pci_bus *bus)
{
pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
pci_domain_nr(bus), bus->number);
/* Allocate bus and devices resources */
pcibios_allocate_bus_resources(bus);
pcibios_claim_one_bus(bus);
/* Add new devices to global lists. Register in proc, sysfs. */
pci_bus_add_devices(bus);
/* Fixup EEH */
eeh_add_device_tree_late(bus);
}
EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
#endif /* CONFIG_HOTPLUG */
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
if (ppc_md.pcibios_enable_device_hook)
if (ppc_md.pcibios_enable_device_hook(dev))
return -EINVAL;
return pci_enable_resources(dev, mask);
}
resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
{
return (unsigned long) hose->io_base_virt - _IO_BASE;
}
static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources)
{
struct resource *res;
int i;
/* Hookup PHB IO resource */
res = &hose->io_resource;
if (!res->flags) {
printk(KERN_WARNING "PCI: I/O resource not set for host"
" bridge %s (domain %d)\n",
hose->dn->full_name, hose->global_number);
#ifdef CONFIG_PPC32
/* Workaround for lack of IO resource only on 32-bit */
res->start = (unsigned long)hose->io_base_virt - isa_io_base;
res->end = res->start + IO_SPACE_LIMIT;
res->flags = IORESOURCE_IO;
#endif /* CONFIG_PPC32 */
}
pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
(unsigned long long)res->start,
(unsigned long long)res->end,
(unsigned long)res->flags);
pci_add_resource_offset(resources, res, pcibios_io_space_offset(hose));
/* Hookup PHB Memory resources */
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
if (!res->flags) {
if (i > 0)
continue;
printk(KERN_ERR "PCI: Memory resource 0 not set for "
"host bridge %s (domain %d)\n",
hose->dn->full_name, hose->global_number);
#ifdef CONFIG_PPC32
/* Workaround for lack of MEM resource only on 32-bit */
res->start = hose->pci_mem_offset;
res->end = (resource_size_t)-1LL;
res->flags = IORESOURCE_MEM;
#endif /* CONFIG_PPC32 */
}
pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i,
(unsigned long long)res->start,
(unsigned long long)res->end,
(unsigned long)res->flags);
pci_add_resource_offset(resources, res, hose->pci_mem_offset);
}
pr_debug("PCI: PHB MEM offset = %016llx\n",
(unsigned long long)hose->pci_mem_offset);
pr_debug("PCI: PHB IO offset = %08lx\n",
(unsigned long)hose->io_base_virt - _IO_BASE);
}
/*
* Null PCI config access functions, for the case when we can't
* find a hose.
*/
#define NULL_PCI_OP(rw, size, type) \
static int \
null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
{ \
return PCIBIOS_DEVICE_NOT_FOUND; \
}
static int
null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 *val)
{
return PCIBIOS_DEVICE_NOT_FOUND;
}
static int
null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 val)
{
return PCIBIOS_DEVICE_NOT_FOUND;
}
static struct pci_ops null_pci_ops =
{
.read = null_read_config,
.write = null_write_config,
};
/*
* These functions are used early on before PCI scanning is done
* and all of the pci_dev and pci_bus structures have been created.
*/
static struct pci_bus *
fake_pci_bus(struct pci_controller *hose, int busnr)
{
static struct pci_bus bus;
if (hose == 0) {
printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
}
bus.number = busnr;
bus.sysdata = hose;
bus.ops = hose? hose->ops: &null_pci_ops;
return &bus;
}
#define EARLY_PCI_OP(rw, size, type) \
int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
int devfn, int offset, type value) \
{ \
return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
devfn, offset, value); \
}
EARLY_PCI_OP(read, byte, u8 *)
EARLY_PCI_OP(read, word, u16 *)
EARLY_PCI_OP(read, dword, u32 *)
EARLY_PCI_OP(write, byte, u8)
EARLY_PCI_OP(write, word, u16)
EARLY_PCI_OP(write, dword, u32)
extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap);
int early_find_capability(struct pci_controller *hose, int bus, int devfn,
int cap)
{
return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
}
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
{
struct pci_controller *hose = bus->sysdata;
return of_node_get(hose->dn);
}
/**
* pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
* @hose: Pointer to the PCI host controller instance structure
*/
void __devinit pcibios_scan_phb(struct pci_controller *hose)
{
LIST_HEAD(resources);
struct pci_bus *bus;
struct device_node *node = hose->dn;
int mode;
pr_debug("PCI: Scanning PHB %s\n",
node ? node->full_name : "<NO NAME>");
/* Get some IO space for the new PHB */
pcibios_setup_phb_io_space(hose);
/* Wire up PHB bus resources */
pcibios_setup_phb_resources(hose, &resources);
/* Create an empty bus for the toplevel */
bus = pci_create_root_bus(hose->parent, hose->first_busno,
hose->ops, hose, &resources);
if (bus == NULL) {
pr_err("Failed to create bus for PCI domain %04x\n",
hose->global_number);
pci_free_resource_list(&resources);
return;
}
bus->secondary = hose->first_busno;
hose->bus = bus;
/* Get probe mode and perform scan */
mode = PCI_PROBE_NORMAL;
if (node && ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE) {
bus->subordinate = hose->last_busno;
of_scan_bus(node, bus);
}
if (mode == PCI_PROBE_NORMAL)
hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
/* Platform gets a chance to do some global fixups before
* we proceed to resource allocation
*/
if (ppc_md.pcibios_fixup_phb)
ppc_md.pcibios_fixup_phb(hose);
/* Configure PCI Express settings */
if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
struct pci_bus *child;
list_for_each_entry(child, &bus->children, node) {
struct pci_dev *self = child->self;
if (!self)
continue;
pcie_bus_configure_settings(child, self->pcie_mpss);
}
}
}
static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
{
int i, class = dev->class >> 8;
/* When configured as agent, programing interface = 1 */
int prog_if = dev->class & 0xf;
if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
class == PCI_CLASS_BRIDGE_OTHER) &&
(dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
(prog_if == 0) &&
(dev->bus->parent == NULL)) {
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
dev->resource[i].start = 0;
dev->resource[i].end = 0;
dev->resource[i].flags = 0;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
| gpl-2.0 |
CMyst/android_kernel_htc_msm8960 | arch/arm/mach-sa1100/generic.c | 4702 | 10504 | /*
* linux/arch/arm/mach-sa1100/generic.c
*
* Author: Nicolas Pitre
*
* Code common to all SA11x0 machines.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/pm.h>
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <video/sa1100fb.h>
#include <asm/div64.h>
#include <asm/mach/map.h>
#include <asm/mach/flash.h>
#include <asm/irq.h>
#include <asm/system_misc.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include "generic.h"
unsigned int reset_status;
EXPORT_SYMBOL(reset_status);
#define NR_FREQS 16
/*
* This table is setup for a 3.6864MHz Crystal.
*/
static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
590, /* 59.0 MHz */
737, /* 73.7 MHz */
885, /* 88.5 MHz */
1032, /* 103.2 MHz */
1180, /* 118.0 MHz */
1327, /* 132.7 MHz */
1475, /* 147.5 MHz */
1622, /* 162.2 MHz */
1769, /* 176.9 MHz */
1917, /* 191.7 MHz */
2064, /* 206.4 MHz */
2212, /* 221.2 MHz */
2359, /* 235.9 MHz */
2507, /* 250.7 MHz */
2654, /* 265.4 MHz */
2802 /* 280.2 MHz */
};
/* rounds up(!) */
unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
{
int i;
khz /= 100;
for (i = 0; i < NR_FREQS; i++)
if (cclk_frequency_100khz[i] >= khz)
break;
return i;
}
unsigned int sa11x0_ppcr_to_freq(unsigned int idx)
{
unsigned int freq = 0;
if (idx < NR_FREQS)
freq = cclk_frequency_100khz[idx] * 100;
return freq;
}
/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on
* this platform, anyway.
*/
int sa11x0_verify_speed(struct cpufreq_policy *policy)
{
unsigned int tmp;
if (policy->cpu)
return -EINVAL;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
/* make sure that at least one frequency is within the policy */
tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100;
if (tmp > policy->max)
policy->max = tmp;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
return 0;
}
unsigned int sa11x0_getspeed(unsigned int cpu)
{
if (cpu)
return 0;
return cclk_frequency_100khz[PPCR & 0xf] * 100;
}
/*
* Default power-off for SA1100
*/
static void sa1100_power_off(void)
{
mdelay(100);
local_irq_disable();
/* disable internal oscillator, float CS lines */
PCFR = (PCFR_OPDE | PCFR_FP | PCFR_FS);
/* enable wake-up on GPIO0 (Assabet...) */
PWER = GFER = GRER = 1;
/*
* set scratchpad to zero, just in case it is used as a
* restart address by the bootloader.
*/
PSPR = 0;
/* enter sleep mode */
PMCR = PMCR_SF;
}
void sa11x0_restart(char mode, const char *cmd)
{
if (mode == 's') {
/* Jump into ROM at address 0 */
soft_restart(0);
} else {
/* Use on-chip reset capability */
RSRR = RSRR_SWR;
}
}
static void sa11x0_register_device(struct platform_device *dev, void *data)
{
int err;
dev->dev.platform_data = data;
err = platform_device_register(dev);
if (err)
printk(KERN_ERR "Unable to register device %s: %d\n",
dev->name, err);
}
static struct resource sa11x0udc_resources[] = {
[0] = DEFINE_RES_MEM(__PREG(Ser0UDCCR), SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_Ser0UDC),
};
static u64 sa11x0udc_dma_mask = 0xffffffffUL;
static struct platform_device sa11x0udc_device = {
.name = "sa11x0-udc",
.id = -1,
.dev = {
.dma_mask = &sa11x0udc_dma_mask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0udc_resources),
.resource = sa11x0udc_resources,
};
static struct resource sa11x0uart1_resources[] = {
[0] = DEFINE_RES_MEM(__PREG(Ser1UTCR0), SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_Ser1UART),
};
static struct platform_device sa11x0uart1_device = {
.name = "sa11x0-uart",
.id = 1,
.num_resources = ARRAY_SIZE(sa11x0uart1_resources),
.resource = sa11x0uart1_resources,
};
static struct resource sa11x0uart3_resources[] = {
[0] = DEFINE_RES_MEM(__PREG(Ser3UTCR0), SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_Ser3UART),
};
static struct platform_device sa11x0uart3_device = {
.name = "sa11x0-uart",
.id = 3,
.num_resources = ARRAY_SIZE(sa11x0uart3_resources),
.resource = sa11x0uart3_resources,
};
static struct resource sa11x0mcp_resources[] = {
[0] = DEFINE_RES_MEM(__PREG(Ser4MCCR0), SZ_64K),
[1] = DEFINE_RES_MEM(__PREG(Ser4MCCR1), 4),
[2] = DEFINE_RES_IRQ(IRQ_Ser4MCP),
};
static u64 sa11x0mcp_dma_mask = 0xffffffffUL;
static struct platform_device sa11x0mcp_device = {
.name = "sa11x0-mcp",
.id = -1,
.dev = {
.dma_mask = &sa11x0mcp_dma_mask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0mcp_resources),
.resource = sa11x0mcp_resources,
};
void __init sa11x0_ppc_configure_mcp(void)
{
/* Setup the PPC unit for the MCP */
PPDR &= ~PPC_RXD4;
PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
PSDR |= PPC_RXD4;
PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
}
void sa11x0_register_mcp(struct mcp_plat_data *data)
{
sa11x0_register_device(&sa11x0mcp_device, data);
}
static struct resource sa11x0ssp_resources[] = {
[0] = DEFINE_RES_MEM(0x80070000, SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_Ser4SSP),
};
static u64 sa11x0ssp_dma_mask = 0xffffffffUL;
static struct platform_device sa11x0ssp_device = {
.name = "sa11x0-ssp",
.id = -1,
.dev = {
.dma_mask = &sa11x0ssp_dma_mask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0ssp_resources),
.resource = sa11x0ssp_resources,
};
static struct resource sa11x0fb_resources[] = {
[0] = DEFINE_RES_MEM(0xb0100000, SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_LCD),
};
static struct platform_device sa11x0fb_device = {
.name = "sa11x0-fb",
.id = -1,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0fb_resources),
.resource = sa11x0fb_resources,
};
void sa11x0_register_lcd(struct sa1100fb_mach_info *inf)
{
sa11x0_register_device(&sa11x0fb_device, inf);
}
static struct platform_device sa11x0pcmcia_device = {
.name = "sa11x0-pcmcia",
.id = -1,
};
static struct platform_device sa11x0mtd_device = {
.name = "sa1100-mtd",
.id = -1,
};
void sa11x0_register_mtd(struct flash_platform_data *flash,
struct resource *res, int nr)
{
flash->name = "sa1100";
sa11x0mtd_device.resource = res;
sa11x0mtd_device.num_resources = nr;
sa11x0_register_device(&sa11x0mtd_device, flash);
}
static struct resource sa11x0ir_resources[] = {
DEFINE_RES_MEM(__PREG(Ser2UTCR0), 0x24),
DEFINE_RES_MEM(__PREG(Ser2HSCR0), 0x1c),
DEFINE_RES_MEM(__PREG(Ser2HSCR2), 0x04),
DEFINE_RES_IRQ(IRQ_Ser2ICP),
};
static struct platform_device sa11x0ir_device = {
.name = "sa11x0-ir",
.id = -1,
.num_resources = ARRAY_SIZE(sa11x0ir_resources),
.resource = sa11x0ir_resources,
};
void sa11x0_register_irda(struct irda_platform_data *irda)
{
sa11x0_register_device(&sa11x0ir_device, irda);
}
static struct resource sa1100_rtc_resources[] = {
DEFINE_RES_MEM(0x90010000, 0x40),
DEFINE_RES_IRQ_NAMED(IRQ_RTC1Hz, "rtc 1Hz"),
DEFINE_RES_IRQ_NAMED(IRQ_RTCAlrm, "rtc alarm"),
};
static struct platform_device sa11x0rtc_device = {
.name = "sa1100-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(sa1100_rtc_resources),
.resource = sa1100_rtc_resources,
};
static struct resource sa11x0dma_resources[] = {
DEFINE_RES_MEM(DMA_PHYS, DMA_SIZE),
DEFINE_RES_IRQ(IRQ_DMA0),
DEFINE_RES_IRQ(IRQ_DMA1),
DEFINE_RES_IRQ(IRQ_DMA2),
DEFINE_RES_IRQ(IRQ_DMA3),
DEFINE_RES_IRQ(IRQ_DMA4),
DEFINE_RES_IRQ(IRQ_DMA5),
};
static u64 sa11x0dma_dma_mask = DMA_BIT_MASK(32);
static struct platform_device sa11x0dma_device = {
.name = "sa11x0-dma",
.id = -1,
.dev = {
.dma_mask = &sa11x0dma_dma_mask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0dma_resources),
.resource = sa11x0dma_resources,
};
static struct platform_device *sa11x0_devices[] __initdata = {
&sa11x0udc_device,
&sa11x0uart1_device,
&sa11x0uart3_device,
&sa11x0ssp_device,
&sa11x0pcmcia_device,
&sa11x0rtc_device,
&sa11x0dma_device,
};
static int __init sa1100_init(void)
{
pm_power_off = sa1100_power_off;
return platform_add_devices(sa11x0_devices, ARRAY_SIZE(sa11x0_devices));
}
arch_initcall(sa1100_init);
/*
* Common I/O mapping:
*
* Typically, static virtual address mappings are as follow:
*
* 0xf0000000-0xf3ffffff: miscellaneous stuff (CPLDs, etc.)
* 0xf4000000-0xf4ffffff: SA-1111
* 0xf5000000-0xf5ffffff: reserved (used by cache flushing area)
* 0xf6000000-0xfffeffff: reserved (internal SA1100 IO defined above)
* 0xffff0000-0xffff0fff: SA1100 exception vectors
* 0xffff2000-0xffff2fff: Minicache copy_user_page area
*
* Below 0xe8000000 is reserved for vm allocation.
*
* The machine specific code must provide the extra mapping beside the
* default mapping provided here.
*/
static struct map_desc standard_io_desc[] __initdata = {
{ /* PCM */
.virtual = 0xf8000000,
.pfn = __phys_to_pfn(0x80000000),
.length = 0x00100000,
.type = MT_DEVICE
}, { /* SCM */
.virtual = 0xfa000000,
.pfn = __phys_to_pfn(0x90000000),
.length = 0x00100000,
.type = MT_DEVICE
}, { /* MER */
.virtual = 0xfc000000,
.pfn = __phys_to_pfn(0xa0000000),
.length = 0x00100000,
.type = MT_DEVICE
}, { /* LCD + DMA */
.virtual = 0xfe000000,
.pfn = __phys_to_pfn(0xb0000000),
.length = 0x00200000,
.type = MT_DEVICE
},
};
void __init sa1100_map_io(void)
{
iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc));
}
/*
* Disable the memory bus request/grant signals on the SA1110 to
* ensure that we don't receive spurious memory requests. We set
* the MBGNT signal false to ensure the SA1111 doesn't own the
* SDRAM bus.
*/
void sa1110_mb_disable(void)
{
unsigned long flags;
local_irq_save(flags);
PGSR &= ~GPIO_MBGNT;
GPCR = GPIO_MBGNT;
GPDR = (GPDR & ~GPIO_MBREQ) | GPIO_MBGNT;
GAFR &= ~(GPIO_MBGNT | GPIO_MBREQ);
local_irq_restore(flags);
}
/*
* If the system is going to use the SA-1111 DMA engines, set up
* the memory bus request/grant pins.
*/
void sa1110_mb_enable(void)
{
unsigned long flags;
local_irq_save(flags);
PGSR &= ~GPIO_MBGNT;
GPCR = GPIO_MBGNT;
GPDR = (GPDR & ~GPIO_MBREQ) | GPIO_MBGNT;
GAFR |= (GPIO_MBGNT | GPIO_MBREQ);
TUCR |= TUCR_MR;
local_irq_restore(flags);
}
| gpl-2.0 |
filippz/kernel-adaptation-n950-n9 | arch/arm/mach-netx/nxdb500.c | 4958 | 4728 | /*
* arch/arm/mach-netx/nxdb500.c
*
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mtd/plat-ram.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/hardware/vic.h>
#include <mach/netx-regs.h>
#include <mach/eth.h>
#include "generic.h"
#include "fb.h"
static struct clcd_panel qvga = {
.mode = {
.name = "QVGA",
.refresh = 60,
.xres = 240,
.yres = 320,
.pixclock = 187617,
.left_margin = 6,
.right_margin = 26,
.upper_margin = 0,
.lower_margin = 6,
.hsync_len = 6,
.vsync_len = 1,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
},
.width = -1,
.height = -1,
.tim2 = 16,
.cntl = CNTL_LCDTFT | CNTL_BGR,
.bpp = 16,
.grayscale = 0,
};
static inline int nxdb500_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
{
var->green.length = 5;
var->green.msb_right = 0;
return clcdfb_check(fb, var);
}
static int nxdb500_clcd_setup(struct clcd_fb *fb)
{
unsigned int val;
fb->fb.var.green.length = 5;
fb->fb.var.green.msb_right = 0;
/* enable asic control */
val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
writel(3, NETX_SYSTEM_IOC_CR);
val = readl(NETX_PIO_OUTPIO);
writel(val | 1, NETX_PIO_OUTPIO);
val = readl(NETX_PIO_OEPIO);
writel(val | 1, NETX_PIO_OEPIO);
return netx_clcd_setup(fb);
}
static struct clcd_board clcd_data = {
.name = "netX",
.check = nxdb500_check,
.decode = clcdfb_decode,
.enable = netx_clcd_enable,
.setup = nxdb500_clcd_setup,
.mmap = netx_clcd_mmap,
.remove = netx_clcd_remove,
};
static struct netxeth_platform_data eth0_platform_data = {
.xcno = 0,
};
static struct platform_device netx_eth0_device = {
.name = "netx-eth",
.id = 0,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð0_platform_data,
}
};
static struct netxeth_platform_data eth1_platform_data = {
.xcno = 1,
};
static struct platform_device netx_eth1_device = {
.name = "netx-eth",
.id = 1,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð1_platform_data,
}
};
static struct resource netx_uart0_resources[] = {
[0] = {
.start = 0x00100A00,
.end = 0x00100A3F,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART0),
.end = (NETX_IRQ_UART0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart0_device = {
.name = "netx-uart",
.id = 0,
.num_resources = ARRAY_SIZE(netx_uart0_resources),
.resource = netx_uart0_resources,
};
static struct resource netx_uart1_resources[] = {
[0] = {
.start = 0x00100A40,
.end = 0x00100A7F,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART1),
.end = (NETX_IRQ_UART1),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart1_device = {
.name = "netx-uart",
.id = 1,
.num_resources = ARRAY_SIZE(netx_uart1_resources),
.resource = netx_uart1_resources,
};
static struct resource netx_uart2_resources[] = {
[0] = {
.start = 0x00100A80,
.end = 0x00100ABF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART2),
.end = (NETX_IRQ_UART2),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart2_device = {
.name = "netx-uart",
.id = 2,
.num_resources = ARRAY_SIZE(netx_uart2_resources),
.resource = netx_uart2_resources,
};
static struct platform_device *devices[] __initdata = {
&netx_eth0_device,
&netx_eth1_device,
&netx_uart0_device,
&netx_uart1_device,
&netx_uart2_device,
};
static void __init nxdb500_init(void)
{
netx_fb_init(&clcd_data, &qvga);
platform_add_devices(devices, ARRAY_SIZE(devices));
}
MACHINE_START(NXDB500, "Hilscher nxdb500")
.atag_offset = 0x100,
.map_io = netx_map_io,
.init_irq = netx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &netx_timer,
.init_machine = nxdb500_init,
.restart = netx_restart,
MACHINE_END
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.